2024-12-06 10:16:39,707 main DEBUG Apache Log4j Core 2.17.2 initializing configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@7fb4f2a9 2024-12-06 10:16:39,719 main DEBUG Took 0.010578 seconds to load 1 plugins from package org.apache.hadoop.hbase.logging 2024-12-06 10:16:39,720 main DEBUG PluginManager 'Core' found 129 plugins 2024-12-06 10:16:39,720 main DEBUG PluginManager 'Level' found 0 plugins 2024-12-06 10:16:39,721 main DEBUG PluginManager 'Lookup' found 16 plugins 2024-12-06 10:16:39,722 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-06 10:16:39,729 main DEBUG PluginManager 'TypeConverter' found 26 plugins 2024-12-06 10:16:39,741 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.util.MBeans", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-06 10:16:39,743 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-06 10:16:39,743 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.logging.TestJul2Slf4j", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-06 10:16:39,744 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-06 10:16:39,744 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.zookeeper", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-06 10:16:39,744 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-06 10:16:39,745 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSinkAdapter", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-06 10:16:39,746 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-06 10:16:39,746 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSystemImpl", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-06 10:16:39,746 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-06 10:16:39,747 main DEBUG LoggerConfig$Builder(additivity="false", level="WARN", levelAndRefs="null", name="org.apache.directory", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-06 10:16:39,747 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-06 10:16:39,748 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.ipc.FailedServers", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-06 10:16:39,748 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-06 10:16:39,749 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsConfig", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-06 10:16:39,749 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-06 10:16:39,749 main DEBUG LoggerConfig$Builder(additivity="null", level="INFO", levelAndRefs="null", name="org.apache.hadoop.hbase.ScheduledChore", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-06 10:16:39,750 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-06 10:16:39,750 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.regionserver.RSRpcServices", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-06 10:16:39,750 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-06 10:16:39,751 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-06 10:16:39,751 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-06 10:16:39,751 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-06 10:16:39,752 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-06 10:16:39,752 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hbase.thirdparty.io.netty.channel", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-06 10:16:39,752 main DEBUG Building Plugin[name=root, class=org.apache.logging.log4j.core.config.LoggerConfig$RootLogger]. 2024-12-06 10:16:39,754 main DEBUG LoggerConfig$RootLogger$Builder(additivity="null", level="null", levelAndRefs="INFO,Console", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-06 10:16:39,755 main DEBUG Building Plugin[name=loggers, class=org.apache.logging.log4j.core.config.LoggersPlugin]. 2024-12-06 10:16:39,757 main DEBUG createLoggers(={org.apache.hadoop.metrics2.util.MBeans, org.apache.hadoop.hbase.logging.TestJul2Slf4j, org.apache.zookeeper, org.apache.hadoop.metrics2.impl.MetricsSinkAdapter, org.apache.hadoop.metrics2.impl.MetricsSystemImpl, org.apache.directory, org.apache.hadoop.hbase.ipc.FailedServers, org.apache.hadoop.metrics2.impl.MetricsConfig, org.apache.hadoop.hbase.ScheduledChore, org.apache.hadoop.hbase.regionserver.RSRpcServices, org.apache.hadoop, org.apache.hadoop.hbase, org.apache.hbase.thirdparty.io.netty.channel, root}) 2024-12-06 10:16:39,757 main DEBUG Building Plugin[name=layout, class=org.apache.logging.log4j.core.layout.PatternLayout]. 2024-12-06 10:16:39,759 main DEBUG PatternLayout$Builder(pattern="%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n", PatternSelector=null, Configuration(PropertiesConfig), Replace=null, charset="null", alwaysWriteExceptions="null", disableAnsi="null", noConsoleNoAnsi="null", header="null", footer="null") 2024-12-06 10:16:39,759 main DEBUG PluginManager 'Converter' found 47 plugins 2024-12-06 10:16:39,768 main DEBUG Building Plugin[name=appender, class=org.apache.hadoop.hbase.logging.HBaseTestAppender]. 2024-12-06 10:16:39,770 main DEBUG HBaseTestAppender$Builder(target="SYSTEM_ERR", maxSize="1G", bufferedIo="null", bufferSize="null", immediateFlush="null", ignoreExceptions="null", PatternLayout(%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n), name="Console", Configuration(PropertiesConfig), Filter=null, ={}) 2024-12-06 10:16:39,772 main DEBUG Starting HBaseTestOutputStreamManager SYSTEM_ERR 2024-12-06 10:16:39,772 main DEBUG Building Plugin[name=appenders, class=org.apache.logging.log4j.core.config.AppendersPlugin]. 2024-12-06 10:16:39,772 main DEBUG createAppenders(={Console}) 2024-12-06 10:16:39,773 main DEBUG Configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@7fb4f2a9 initialized 2024-12-06 10:16:39,773 main DEBUG Starting configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@7fb4f2a9 2024-12-06 10:16:39,774 main DEBUG Started configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@7fb4f2a9 OK. 2024-12-06 10:16:39,774 main DEBUG Shutting down OutputStreamManager SYSTEM_OUT.false.false-1 2024-12-06 10:16:39,774 main DEBUG OutputStream closed 2024-12-06 10:16:39,775 main DEBUG Shut down OutputStreamManager SYSTEM_OUT.false.false-1, all resources released: true 2024-12-06 10:16:39,775 main DEBUG Appender DefaultConsole-1 stopped with status true 2024-12-06 10:16:39,775 main DEBUG Stopped org.apache.logging.log4j.core.config.DefaultConfiguration@54e1c68b OK 2024-12-06 10:16:39,845 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6 2024-12-06 10:16:39,847 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=StatusLogger 2024-12-06 10:16:39,848 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=ContextSelector 2024-12-06 10:16:39,849 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name= 2024-12-06 10:16:39,850 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.directory 2024-12-06 10:16:39,850 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSinkAdapter 2024-12-06 10:16:39,850 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.zookeeper 2024-12-06 10:16:39,851 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.logging.TestJul2Slf4j 2024-12-06 10:16:39,851 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSystemImpl 2024-12-06 10:16:39,851 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.util.MBeans 2024-12-06 10:16:39,851 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase 2024-12-06 10:16:39,852 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop 2024-12-06 10:16:39,852 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ipc.FailedServers 2024-12-06 10:16:39,852 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.regionserver.RSRpcServices 2024-12-06 10:16:39,853 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsConfig 2024-12-06 10:16:39,853 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hbase.thirdparty.io.netty.channel 2024-12-06 10:16:39,853 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ScheduledChore 2024-12-06 10:16:39,854 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Appenders,name=Console 2024-12-06 10:16:39,856 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-12-06 10:16:39,857 main DEBUG Reconfiguration complete for context[name=1dbd16a6] at URI jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-logging/target/hbase-logging-2.7.0-SNAPSHOT-tests.jar!/log4j2.properties (org.apache.logging.log4j.core.LoggerContext@7dda48d9) with optional ClassLoader: null 2024-12-06 10:16:39,857 main DEBUG Shutdown hook enabled. Registering a new one. 2024-12-06 10:16:39,858 main DEBUG LoggerContext[name=1dbd16a6, org.apache.logging.log4j.core.LoggerContext@7dda48d9] started OK. 2024-12-06T10:16:40,094 DEBUG [main {}] hbase.HBaseTestingUtility(348): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/919d2d74-7337-66ca-1565-c2037856b4c9 2024-12-06 10:16:40,096 main DEBUG AsyncLogger.ThreadNameStrategy=UNCACHED (user specified null, default is UNCACHED) 2024-12-06 10:16:40,097 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-12-06T10:16:40,106 INFO [main {}] hbase.HBaseClassTestRule(94): Test class org.apache.hadoop.hbase.TestAcidGuaranteesWithAdaptivePolicy timeout: 13 mins 2024-12-06T10:16:40,126 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1126): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=1, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-12-06T10:16:40,129 INFO [Time-limited test {}] hbase.HBaseZKTestingUtility(82): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/919d2d74-7337-66ca-1565-c2037856b4c9/cluster_b84c5cb5-887e-107d-c16e-13155fed988e, deleteOnExit=true 2024-12-06T10:16:40,129 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1140): STARTING DFS 2024-12-06T10:16:40,130 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/919d2d74-7337-66ca-1565-c2037856b4c9/test.cache.data in system properties and HBase conf 2024-12-06T10:16:40,130 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/919d2d74-7337-66ca-1565-c2037856b4c9/hadoop.tmp.dir in system properties and HBase conf 2024-12-06T10:16:40,131 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/919d2d74-7337-66ca-1565-c2037856b4c9/hadoop.log.dir in system properties and HBase conf 2024-12-06T10:16:40,131 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/919d2d74-7337-66ca-1565-c2037856b4c9/mapreduce.cluster.local.dir in system properties and HBase conf 2024-12-06T10:16:40,132 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/919d2d74-7337-66ca-1565-c2037856b4c9/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-12-06T10:16:40,133 INFO [Time-limited test {}] hbase.HBaseTestingUtility(811): read short circuit is OFF 2024-12-06T10:16:40,228 WARN [Time-limited test {}] util.NativeCodeLoader(60): Unable to load native-hadoop library for your platform... using builtin-java classes where applicable 2024-12-06T10:16:40,332 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-12-06T10:16:40,336 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/919d2d74-7337-66ca-1565-c2037856b4c9/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-12-06T10:16:40,336 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/919d2d74-7337-66ca-1565-c2037856b4c9/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-12-06T10:16:40,337 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/919d2d74-7337-66ca-1565-c2037856b4c9/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-12-06T10:16:40,337 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/919d2d74-7337-66ca-1565-c2037856b4c9/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-06T10:16:40,338 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/919d2d74-7337-66ca-1565-c2037856b4c9/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-12-06T10:16:40,338 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/919d2d74-7337-66ca-1565-c2037856b4c9/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-12-06T10:16:40,339 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/919d2d74-7337-66ca-1565-c2037856b4c9/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-06T10:16:40,339 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/919d2d74-7337-66ca-1565-c2037856b4c9/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-06T10:16:40,339 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/919d2d74-7337-66ca-1565-c2037856b4c9/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-12-06T10:16:40,340 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/919d2d74-7337-66ca-1565-c2037856b4c9/nfs.dump.dir in system properties and HBase conf 2024-12-06T10:16:40,340 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/919d2d74-7337-66ca-1565-c2037856b4c9/java.io.tmpdir in system properties and HBase conf 2024-12-06T10:16:40,340 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/919d2d74-7337-66ca-1565-c2037856b4c9/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-06T10:16:40,341 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/919d2d74-7337-66ca-1565-c2037856b4c9/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-12-06T10:16:40,341 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/919d2d74-7337-66ca-1565-c2037856b4c9/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-12-06T10:16:41,188 WARN [Time-limited test {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-namenode.properties,hadoop-metrics2.properties 2024-12-06T10:16:41,264 INFO [Time-limited test {}] log.Log(170): Logging initialized @2342ms to org.eclipse.jetty.util.log.Slf4jLog 2024-12-06T10:16:41,352 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-06T10:16:41,441 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-06T10:16:41,468 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-06T10:16:41,469 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-06T10:16:41,470 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-06T10:16:41,484 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-06T10:16:41,487 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@8167a4c{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/919d2d74-7337-66ca-1565-c2037856b4c9/hadoop.log.dir/,AVAILABLE} 2024-12-06T10:16:41,488 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@62a9beb2{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-06T10:16:41,686 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@b03fcff{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/919d2d74-7337-66ca-1565-c2037856b4c9/java.io.tmpdir/jetty-localhost-41765-hadoop-hdfs-3_4_1-tests_jar-_-any-675128942931303165/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-06T10:16:41,697 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@e0a3ea0{HTTP/1.1, (http/1.1)}{localhost:41765} 2024-12-06T10:16:41,697 INFO [Time-limited test {}] server.Server(415): Started @2776ms 2024-12-06T10:16:42,171 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-06T10:16:42,179 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-06T10:16:42,181 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-06T10:16:42,181 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-06T10:16:42,181 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-06T10:16:42,182 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2ca71a25{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/919d2d74-7337-66ca-1565-c2037856b4c9/hadoop.log.dir/,AVAILABLE} 2024-12-06T10:16:42,183 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@47db50b9{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-06T10:16:42,307 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@3054265c{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/919d2d74-7337-66ca-1565-c2037856b4c9/java.io.tmpdir/jetty-localhost-34737-hadoop-hdfs-3_4_1-tests_jar-_-any-12267280824954096345/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-06T10:16:42,308 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@65902fec{HTTP/1.1, (http/1.1)}{localhost:34737} 2024-12-06T10:16:42,309 INFO [Time-limited test {}] server.Server(415): Started @3388ms 2024-12-06T10:16:42,368 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-06T10:16:42,869 WARN [Thread-72 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/919d2d74-7337-66ca-1565-c2037856b4c9/cluster_b84c5cb5-887e-107d-c16e-13155fed988e/dfs/data/data1/current/BP-1797330189-172.17.0.2-1733480200959/current, will proceed with Du for space computation calculation, 2024-12-06T10:16:42,869 WARN [Thread-73 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/919d2d74-7337-66ca-1565-c2037856b4c9/cluster_b84c5cb5-887e-107d-c16e-13155fed988e/dfs/data/data2/current/BP-1797330189-172.17.0.2-1733480200959/current, will proceed with Du for space computation calculation, 2024-12-06T10:16:42,913 WARN [Thread-58 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-06T10:16:42,968 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x6bf7ea2ea3a9904b with lease ID 0x4e9a6e40c94dee48: Processing first storage report for DS-fcabb5ff-393b-4056-813b-f949da145545 from datanode DatanodeRegistration(127.0.0.1:37093, datanodeUuid=7a5359e7-667f-49eb-befc-2063e29c50e2, infoPort=43645, infoSecurePort=0, ipcPort=45205, storageInfo=lv=-57;cid=testClusterID;nsid=773323291;c=1733480200959) 2024-12-06T10:16:42,970 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x6bf7ea2ea3a9904b with lease ID 0x4e9a6e40c94dee48: from storage DS-fcabb5ff-393b-4056-813b-f949da145545 node DatanodeRegistration(127.0.0.1:37093, datanodeUuid=7a5359e7-667f-49eb-befc-2063e29c50e2, infoPort=43645, infoSecurePort=0, ipcPort=45205, storageInfo=lv=-57;cid=testClusterID;nsid=773323291;c=1733480200959), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-12-06T10:16:42,970 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x6bf7ea2ea3a9904b with lease ID 0x4e9a6e40c94dee48: Processing first storage report for DS-b6844aed-26b6-4d38-8c18-37a06672f43d from datanode DatanodeRegistration(127.0.0.1:37093, datanodeUuid=7a5359e7-667f-49eb-befc-2063e29c50e2, infoPort=43645, infoSecurePort=0, ipcPort=45205, storageInfo=lv=-57;cid=testClusterID;nsid=773323291;c=1733480200959) 2024-12-06T10:16:42,970 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x6bf7ea2ea3a9904b with lease ID 0x4e9a6e40c94dee48: from storage DS-b6844aed-26b6-4d38-8c18-37a06672f43d node DatanodeRegistration(127.0.0.1:37093, datanodeUuid=7a5359e7-667f-49eb-befc-2063e29c50e2, infoPort=43645, infoSecurePort=0, ipcPort=45205, storageInfo=lv=-57;cid=testClusterID;nsid=773323291;c=1733480200959), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-06T10:16:43,060 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(703): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/919d2d74-7337-66ca-1565-c2037856b4c9 2024-12-06T10:16:43,138 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(259): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/919d2d74-7337-66ca-1565-c2037856b4c9/cluster_b84c5cb5-887e-107d-c16e-13155fed988e/zookeeper_0, clientPort=61610, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/919d2d74-7337-66ca-1565-c2037856b4c9/cluster_b84c5cb5-887e-107d-c16e-13155fed988e/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/919d2d74-7337-66ca-1565-c2037856b4c9/cluster_b84c5cb5-887e-107d-c16e-13155fed988e/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-12-06T10:16:43,148 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(284): Started MiniZooKeeperCluster and ran 'stat' on client port=61610 2024-12-06T10:16:43,162 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-06T10:16:43,166 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-06T10:16:43,426 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073741825_1001 (size=7) 2024-12-06T10:16:43,837 INFO [Time-limited test {}] util.FSUtils(490): Created version file at hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4 with version=8 2024-12-06T10:16:43,837 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1460): Setting hbase.fs.tmp.dir to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/hbase-staging 2024-12-06T10:16:43,972 DEBUG [Time-limited test {}] channel.MultithreadEventLoopGroup(44): -Dio.netty.eventLoopThreads: 16 2024-12-06T10:16:44,257 INFO [Time-limited test {}] client.ConnectionUtils(129): master/552d6a33fa09:0 server-side Connection retries=45 2024-12-06T10:16:44,277 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-06T10:16:44,278 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-06T10:16:44,278 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-06T10:16:44,279 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-06T10:16:44,279 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-06T10:16:44,421 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-06T10:16:44,488 INFO [Time-limited test {}] metrics.MetricRegistriesLoader(60): Loaded MetricRegistries class org.apache.hadoop.hbase.metrics.impl.MetricRegistriesImpl 2024-12-06T10:16:44,502 DEBUG [Time-limited test {}] util.ClassSize(228): Using Unsafe to estimate memory layout 2024-12-06T10:16:44,506 INFO [Time-limited test {}] ipc.NettyRpcServer(315): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-06T10:16:44,535 DEBUG [Time-limited test {}] channel.DefaultChannelId(84): -Dio.netty.processId: 5827 (auto-detected) 2024-12-06T10:16:44,536 DEBUG [Time-limited test {}] channel.DefaultChannelId(106): -Dio.netty.machineId: 02:42:ac:ff:fe:11:00:02 (auto-detected) 2024-12-06T10:16:44,556 INFO [Time-limited test {}] ipc.NettyRpcServer(197): Bind to /172.17.0.2:33169 2024-12-06T10:16:44,565 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-06T10:16:44,567 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-06T10:16:44,581 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(138): Process identifier=master:33169 connecting to ZooKeeper ensemble=127.0.0.1:61610 2024-12-06T10:16:44,616 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:331690x0, quorum=127.0.0.1:61610, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-06T10:16:44,619 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:33169-0x10066d47c570000 connected 2024-12-06T10:16:44,652 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:33169-0x10066d47c570000, quorum=127.0.0.1:61610, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-06T10:16:44,655 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:33169-0x10066d47c570000, quorum=127.0.0.1:61610, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-06T10:16:44,659 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:33169-0x10066d47c570000, quorum=127.0.0.1:61610, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-06T10:16:44,664 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=33169 2024-12-06T10:16:44,665 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=33169 2024-12-06T10:16:44,666 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=33169 2024-12-06T10:16:44,667 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=33169 2024-12-06T10:16:44,668 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=33169 2024-12-06T10:16:44,677 INFO [Time-limited test {}] master.HMaster(488): hbase.rootdir=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4, hbase.cluster.distributed=false 2024-12-06T10:16:44,744 INFO [Time-limited test {}] client.ConnectionUtils(129): regionserver/552d6a33fa09:0 server-side Connection retries=45 2024-12-06T10:16:44,744 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-06T10:16:44,744 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-06T10:16:44,745 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-06T10:16:44,745 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-06T10:16:44,745 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-06T10:16:44,747 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-06T10:16:44,750 INFO [Time-limited test {}] ipc.NettyRpcServer(315): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-06T10:16:44,751 INFO [Time-limited test {}] ipc.NettyRpcServer(197): Bind to /172.17.0.2:33397 2024-12-06T10:16:44,754 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-06T10:16:44,760 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-06T10:16:44,761 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-06T10:16:44,765 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-06T10:16:44,769 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(138): Process identifier=regionserver:33397 connecting to ZooKeeper ensemble=127.0.0.1:61610 2024-12-06T10:16:44,773 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:333970x0, quorum=127.0.0.1:61610, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-06T10:16:44,773 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:33397-0x10066d47c570001 connected 2024-12-06T10:16:44,774 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:33397-0x10066d47c570001, quorum=127.0.0.1:61610, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-06T10:16:44,775 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:33397-0x10066d47c570001, quorum=127.0.0.1:61610, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-06T10:16:44,776 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:33397-0x10066d47c570001, quorum=127.0.0.1:61610, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-06T10:16:44,777 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=33397 2024-12-06T10:16:44,777 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=33397 2024-12-06T10:16:44,778 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=33397 2024-12-06T10:16:44,778 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=33397 2024-12-06T10:16:44,779 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=33397 2024-12-06T10:16:44,781 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] master.HMaster(2445): Adding backup master ZNode /hbase/backup-masters/552d6a33fa09,33169,1733480203965 2024-12-06T10:16:44,788 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33169-0x10066d47c570000, quorum=127.0.0.1:61610, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-06T10:16:44,788 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33397-0x10066d47c570001, quorum=127.0.0.1:61610, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-06T10:16:44,790 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:33169-0x10066d47c570000, quorum=127.0.0.1:61610, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/552d6a33fa09,33169,1733480203965 2024-12-06T10:16:44,796 DEBUG [M:0;552d6a33fa09:33169 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;552d6a33fa09:33169 2024-12-06T10:16:44,813 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33169-0x10066d47c570000, quorum=127.0.0.1:61610, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-06T10:16:44,813 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33397-0x10066d47c570001, quorum=127.0.0.1:61610, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-06T10:16:44,814 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33169-0x10066d47c570000, quorum=127.0.0.1:61610, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T10:16:44,814 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33397-0x10066d47c570001, quorum=127.0.0.1:61610, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T10:16:44,814 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:33169-0x10066d47c570000, quorum=127.0.0.1:61610, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-06T10:16:44,815 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(111): master:33169-0x10066d47c570000, quorum=127.0.0.1:61610, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-06T10:16:44,816 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/552d6a33fa09,33169,1733480203965 from backup master directory 2024-12-06T10:16:44,819 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33169-0x10066d47c570000, quorum=127.0.0.1:61610, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/552d6a33fa09,33169,1733480203965 2024-12-06T10:16:44,819 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33397-0x10066d47c570001, quorum=127.0.0.1:61610, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-06T10:16:44,819 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33169-0x10066d47c570000, quorum=127.0.0.1:61610, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-06T10:16:44,819 WARN [master/552d6a33fa09:0:becomeActiveMaster {}] hbase.ZNodeClearer(69): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-06T10:16:44,820 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=552d6a33fa09,33169,1733480203965 2024-12-06T10:16:44,822 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating data MemStoreChunkPool with chunk size 2 MB, max count 396, initial count 0 2024-12-06T10:16:44,823 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating index MemStoreChunkPool with chunk size 204.80 KB, max count 440, initial count 0 2024-12-06T10:16:44,892 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073741826_1002 (size=42) 2024-12-06T10:16:45,296 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] util.FSUtils(639): Created cluster ID file at hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/hbase.id with ID: cd8411fc-7cfe-484d-bfcf-9959dc7ff3c6 2024-12-06T10:16:45,340 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-06T10:16:45,369 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33397-0x10066d47c570001, quorum=127.0.0.1:61610, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T10:16:45,369 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33169-0x10066d47c570000, quorum=127.0.0.1:61610, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T10:16:45,384 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073741827_1003 (size=196) 2024-12-06T10:16:45,804 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] region.MasterRegion(372): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-06T10:16:45,806 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-12-06T10:16:45,825 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(244): No decryptEncryptedDataEncryptionKey method in DFSClient, should be hadoop version with HDFS-12396 java.lang.NoSuchMethodException: org.apache.hadoop.hdfs.DFSClient.decryptEncryptedDataEncryptionKey(org.apache.hadoop.fs.FileEncryptionInfo) at java.lang.Class.getDeclaredMethod(Class.java:2675) ~[?:?] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.createTransparentCryptoHelperWithoutHDFS12396(FanOutOneBlockAsyncDFSOutputSaslHelper.java:183) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.createTransparentCryptoHelper(FanOutOneBlockAsyncDFSOutputSaslHelper.java:242) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.(FanOutOneBlockAsyncDFSOutputSaslHelper.java:253) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at java.lang.Class.forName0(Native Method) ~[?:?] at java.lang.Class.forName(Class.java:375) ~[?:?] at org.apache.hadoop.hbase.wal.AsyncFSWALProvider.load(AsyncFSWALProvider.java:147) ~[classes/:?] at org.apache.hadoop.hbase.wal.WALFactory.getProviderClass(WALFactory.java:160) ~[classes/:?] at org.apache.hadoop.hbase.wal.WALFactory.getProvider(WALFactory.java:200) ~[classes/:?] at org.apache.hadoop.hbase.wal.WALFactory.(WALFactory.java:232) ~[classes/:?] at org.apache.hadoop.hbase.wal.WALFactory.(WALFactory.java:207) ~[classes/:?] at org.apache.hadoop.hbase.master.region.MasterRegion.create(MasterRegion.java:402) ~[classes/:?] at org.apache.hadoop.hbase.master.region.MasterRegionFactory.create(MasterRegionFactory.java:135) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.finishActiveMasterInitialization(HMaster.java:973) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.startActiveMasterManager(HMaster.java:2470) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.lambda$run$0(HMaster.java:590) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.HMaster.lambda$run$1(HMaster.java:587) ~[classes/:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:16:45,830 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] wal.WALFactory(183): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-06T10:16:45,863 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073741828_1004 (size=1189) 2024-12-06T10:16:45,882 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] regionserver.HRegion(7124): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/MasterData/data/master/store 2024-12-06T10:16:45,897 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073741829_1005 (size=34) 2024-12-06T10:16:46,304 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] throttle.StoreHotnessProtector(112): StoreHotnessProtector is disabled. Set hbase.region.store.parallel.put.limit > 0 to enable, which may help mitigate load under heavy write pressure. 2024-12-06T10:16:46,304 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] regionserver.HRegion(894): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-06T10:16:46,306 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] regionserver.HRegion(1681): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-06T10:16:46,306 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] regionserver.HRegion(1703): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-06T10:16:46,306 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] regionserver.HRegion(1724): Waiting without time limit for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-06T10:16:46,306 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] regionserver.HRegion(1791): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-06T10:16:46,306 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] regionserver.HRegion(1801): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-06T10:16:46,306 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] regionserver.HRegion(1922): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-06T10:16:46,307 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] regionserver.HRegion(1635): Region close journal for 1595e783b53d99cd5eef43b6debb2682: 2024-12-06T10:16:46,309 WARN [master/552d6a33fa09:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/MasterData/data/master/store/.initializing 2024-12-06T10:16:46,309 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/MasterData/WALs/552d6a33fa09,33169,1733480203965 2024-12-06T10:16:46,316 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-06T10:16:46,327 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] wal.AbstractFSWAL(500): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=552d6a33fa09%2C33169%2C1733480203965, suffix=, logDir=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/MasterData/WALs/552d6a33fa09,33169,1733480203965, archiveDir=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/MasterData/oldWALs, maxLogs=10 2024-12-06T10:16:46,350 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(617): When create output stream for /user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/MasterData/WALs/552d6a33fa09,33169,1733480203965/552d6a33fa09%2C33169%2C1733480203965.1733480206332, exclude list is [], retry=0 2024-12-06T10:16:46,367 DEBUG [RS-EventLoopGroup-3-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:37093,DS-fcabb5ff-393b-4056-813b-f949da145545,DISK] 2024-12-06T10:16:46,370 DEBUG [RS-EventLoopGroup-3-2 {}] asyncfs.ProtobufDecoder(117): Hadoop 3.3 and above shades protobuf. 2024-12-06T10:16:46,408 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] wal.AbstractFSWAL(841): New WAL /user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/MasterData/WALs/552d6a33fa09,33169,1733480203965/552d6a33fa09%2C33169%2C1733480203965.1733480206332 2024-12-06T10:16:46,409 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] wal.AbstractFSWAL(925): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:43645:43645)] 2024-12-06T10:16:46,410 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] regionserver.HRegion(7285): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-12-06T10:16:46,410 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] regionserver.HRegion(894): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-06T10:16:46,414 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] regionserver.HRegion(7327): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-12-06T10:16:46,415 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] regionserver.HRegion(7330): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-12-06T10:16:46,455 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-12-06T10:16:46,481 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-12-06T10:16:46,485 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:16:46,488 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-06T10:16:46,489 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-12-06T10:16:46,492 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-12-06T10:16:46,492 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:16:46,493 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-06T10:16:46,493 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-12-06T10:16:46,496 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-12-06T10:16:46,496 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:16:46,498 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-06T10:16:46,498 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-12-06T10:16:46,501 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-12-06T10:16:46,501 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:16:46,502 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-06T10:16:46,506 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-06T10:16:46,507 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-06T10:16:46,515 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-06T10:16:46,519 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] regionserver.HRegion(1085): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-12-06T10:16:46,523 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-06T10:16:46,524 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] regionserver.HRegion(1102): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=69791207, jitterRate=0.039970025420188904}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-06T10:16:46,528 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] regionserver.HRegion(1001): Region open journal for 1595e783b53d99cd5eef43b6debb2682: 2024-12-06T10:16:46,529 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-12-06T10:16:46,558 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@71c5ac83, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-06T10:16:46,592 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] master.HMaster(882): No meta location available on zookeeper, skip migrating... 2024-12-06T10:16:46,605 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-12-06T10:16:46,605 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(633): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-12-06T10:16:46,607 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-12-06T10:16:46,609 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(653): Recovered RegionProcedureStore lease in 1 msec 2024-12-06T10:16:46,614 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(667): Loaded RegionProcedureStore in 4 msec 2024-12-06T10:16:46,614 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-12-06T10:16:46,640 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-12-06T10:16:46,652 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:33169-0x10066d47c570000, quorum=127.0.0.1:61610, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-12-06T10:16:46,654 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/balancer already deleted, retry=false 2024-12-06T10:16:46,657 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-12-06T10:16:46,658 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:33169-0x10066d47c570000, quorum=127.0.0.1:61610, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-12-06T10:16:46,660 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/normalizer already deleted, retry=false 2024-12-06T10:16:46,662 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-12-06T10:16:46,665 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:33169-0x10066d47c570000, quorum=127.0.0.1:61610, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-12-06T10:16:46,667 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/switch/split already deleted, retry=false 2024-12-06T10:16:46,668 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:33169-0x10066d47c570000, quorum=127.0.0.1:61610, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-12-06T10:16:46,669 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/switch/merge already deleted, retry=false 2024-12-06T10:16:46,681 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:33169-0x10066d47c570000, quorum=127.0.0.1:61610, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-12-06T10:16:46,683 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-12-06T10:16:46,687 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33169-0x10066d47c570000, quorum=127.0.0.1:61610, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-06T10:16:46,687 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33397-0x10066d47c570001, quorum=127.0.0.1:61610, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-06T10:16:46,687 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33397-0x10066d47c570001, quorum=127.0.0.1:61610, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T10:16:46,687 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33169-0x10066d47c570000, quorum=127.0.0.1:61610, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T10:16:46,687 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] master.HMaster(826): Active/primary master=552d6a33fa09,33169,1733480203965, sessionid=0x10066d47c570000, setting cluster-up flag (Was=false) 2024-12-06T10:16:46,700 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33397-0x10066d47c570001, quorum=127.0.0.1:61610, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T10:16:46,700 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33169-0x10066d47c570000, quorum=127.0.0.1:61610, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T10:16:46,706 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-12-06T10:16:46,708 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=552d6a33fa09,33169,1733480203965 2024-12-06T10:16:46,714 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33397-0x10066d47c570001, quorum=127.0.0.1:61610, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T10:16:46,714 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33169-0x10066d47c570000, quorum=127.0.0.1:61610, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T10:16:46,720 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-12-06T10:16:46,722 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=552d6a33fa09,33169,1733480203965 2024-12-06T10:16:46,795 DEBUG [RS:0;552d6a33fa09:33397 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;552d6a33fa09:33397 2024-12-06T10:16:46,796 INFO [RS:0;552d6a33fa09:33397 {}] regionserver.HRegionServer(1008): ClusterId : cd8411fc-7cfe-484d-bfcf-9959dc7ff3c6 2024-12-06T10:16:46,799 DEBUG [RS:0;552d6a33fa09:33397 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-06T10:16:46,804 DEBUG [RS:0;552d6a33fa09:33397 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-06T10:16:46,805 DEBUG [RS:0;552d6a33fa09:33397 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-06T10:16:46,808 DEBUG [RS:0;552d6a33fa09:33397 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-06T10:16:46,808 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT; InitMetaProcedure table=hbase:meta 2024-12-06T10:16:46,809 DEBUG [RS:0;552d6a33fa09:33397 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6dc55a1e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-06T10:16:46,813 DEBUG [RS:0;552d6a33fa09:33397 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@23694f6d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=552d6a33fa09/172.17.0.2:0 2024-12-06T10:16:46,814 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(575): slop=0.2 2024-12-06T10:16:46,817 INFO [RS:0;552d6a33fa09:33397 {}] regionserver.RegionServerCoprocessorHost(67): System coprocessor loading is enabled 2024-12-06T10:16:46,817 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(294): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-12-06T10:16:46,817 INFO [RS:0;552d6a33fa09:33397 {}] regionserver.RegionServerCoprocessorHost(68): Table coprocessor loading is enabled 2024-12-06T10:16:46,817 DEBUG [RS:0;552d6a33fa09:33397 {}] regionserver.HRegionServer(1090): About to register with Master. 2024-12-06T10:16:46,820 INFO [RS:0;552d6a33fa09:33397 {}] regionserver.HRegionServer(3073): reportForDuty to master=552d6a33fa09,33169,1733480203965 with isa=552d6a33fa09/172.17.0.2:33397, startcode=1733480204743 2024-12-06T10:16:46,823 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] balancer.RegionLocationFinder(146): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 552d6a33fa09,33169,1733480203965 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-12-06T10:16:46,827 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/552d6a33fa09:0, corePoolSize=5, maxPoolSize=5 2024-12-06T10:16:46,828 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/552d6a33fa09:0, corePoolSize=5, maxPoolSize=5 2024-12-06T10:16:46,828 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/552d6a33fa09:0, corePoolSize=5, maxPoolSize=5 2024-12-06T10:16:46,828 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/552d6a33fa09:0, corePoolSize=5, maxPoolSize=5 2024-12-06T10:16:46,828 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/552d6a33fa09:0, corePoolSize=10, maxPoolSize=10 2024-12-06T10:16:46,829 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/552d6a33fa09:0, corePoolSize=1, maxPoolSize=1 2024-12-06T10:16:46,829 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/552d6a33fa09:0, corePoolSize=2, maxPoolSize=2 2024-12-06T10:16:46,829 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/552d6a33fa09:0, corePoolSize=1, maxPoolSize=1 2024-12-06T10:16:46,834 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(81): ADDED pid=-1, state=WAITING_TIMEOUT; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1733480236834 2024-12-06T10:16:46,835 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(96): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, locked=true; InitMetaProcedure table=hbase:meta 2024-12-06T10:16:46,836 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(75): BOOTSTRAP: creating hbase:meta region 2024-12-06T10:16:46,836 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-12-06T10:16:46,837 DEBUG [RS:0;552d6a33fa09:33397 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-06T10:16:46,838 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-12-06T10:16:46,841 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:16:46,841 INFO [PEWorker-1 {}] util.FSTableDescriptors(133): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-06T10:16:46,842 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-12-06T10:16:46,842 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-12-06T10:16:46,843 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-12-06T10:16:46,843 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-12-06T10:16:46,843 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-06T10:16:46,845 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-12-06T10:16:46,846 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-12-06T10:16:46,847 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-12-06T10:16:46,849 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-12-06T10:16:46,849 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-12-06T10:16:46,852 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/552d6a33fa09:0:becomeActiveMaster-HFileCleaner.large.0-1733480206851,5,FailOnTimeoutGroup] 2024-12-06T10:16:46,852 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/552d6a33fa09:0:becomeActiveMaster-HFileCleaner.small.0-1733480206852,5,FailOnTimeoutGroup] 2024-12-06T10:16:46,852 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-06T10:16:46,853 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] master.HMaster(1680): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-12-06T10:16:46,853 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073741831_1007 (size=1039) 2024-12-06T10:16:46,854 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-12-06T10:16:46,855 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-12-06T10:16:46,879 INFO [RS-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:38957, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins.hfs.0 (auth:SIMPLE), service=RegionServerStatusService 2024-12-06T10:16:46,885 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=33169 {}] master.ServerManager(332): Checking decommissioned status of RegionServer 552d6a33fa09,33397,1733480204743 2024-12-06T10:16:46,888 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=33169 {}] master.ServerManager(486): Registering regionserver=552d6a33fa09,33397,1733480204743 2024-12-06T10:16:46,904 DEBUG [RS:0;552d6a33fa09:33397 {}] regionserver.HRegionServer(1725): Config from master: hbase.rootdir=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4 2024-12-06T10:16:46,904 DEBUG [RS:0;552d6a33fa09:33397 {}] regionserver.HRegionServer(1725): Config from master: fs.defaultFS=hdfs://localhost:40601 2024-12-06T10:16:46,904 DEBUG [RS:0;552d6a33fa09:33397 {}] regionserver.HRegionServer(1725): Config from master: hbase.master.info.port=-1 2024-12-06T10:16:46,909 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33169-0x10066d47c570000, quorum=127.0.0.1:61610, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-06T10:16:46,909 DEBUG [RS:0;552d6a33fa09:33397 {}] zookeeper.ZKUtil(111): regionserver:33397-0x10066d47c570001, quorum=127.0.0.1:61610, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/552d6a33fa09,33397,1733480204743 2024-12-06T10:16:46,910 WARN [RS:0;552d6a33fa09:33397 {}] hbase.ZNodeClearer(69): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-06T10:16:46,910 INFO [RS:0;552d6a33fa09:33397 {}] wal.WALFactory(183): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-06T10:16:46,910 DEBUG [RS:0;552d6a33fa09:33397 {}] regionserver.HRegionServer(2100): logDir=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/WALs/552d6a33fa09,33397,1733480204743 2024-12-06T10:16:46,912 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [552d6a33fa09,33397,1733480204743] 2024-12-06T10:16:46,926 DEBUG [RS:0;552d6a33fa09:33397 {}] regionserver.Replication(140): Replication stats-in-log period=300 seconds 2024-12-06T10:16:46,941 INFO [RS:0;552d6a33fa09:33397 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-06T10:16:46,959 INFO [RS:0;552d6a33fa09:33397 {}] regionserver.MemStoreFlusher(130): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-06T10:16:46,963 INFO [RS:0;552d6a33fa09:33397 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-06T10:16:46,963 INFO [RS:0;552d6a33fa09:33397 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-06T10:16:46,964 INFO [RS:0;552d6a33fa09:33397 {}] regionserver.HRegionServer$CompactionChecker(1988): CompactionChecker runs every PT1S 2024-12-06T10:16:46,973 INFO [RS:0;552d6a33fa09:33397 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-06T10:16:46,973 DEBUG [RS:0;552d6a33fa09:33397 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/552d6a33fa09:0, corePoolSize=1, maxPoolSize=1 2024-12-06T10:16:46,973 DEBUG [RS:0;552d6a33fa09:33397 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/552d6a33fa09:0, corePoolSize=1, maxPoolSize=1 2024-12-06T10:16:46,974 DEBUG [RS:0;552d6a33fa09:33397 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/552d6a33fa09:0, corePoolSize=1, maxPoolSize=1 2024-12-06T10:16:46,974 DEBUG [RS:0;552d6a33fa09:33397 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/552d6a33fa09:0, corePoolSize=1, maxPoolSize=1 2024-12-06T10:16:46,974 DEBUG [RS:0;552d6a33fa09:33397 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/552d6a33fa09:0, corePoolSize=1, maxPoolSize=1 2024-12-06T10:16:46,974 DEBUG [RS:0;552d6a33fa09:33397 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/552d6a33fa09:0, corePoolSize=2, maxPoolSize=2 2024-12-06T10:16:46,974 DEBUG [RS:0;552d6a33fa09:33397 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/552d6a33fa09:0, corePoolSize=1, maxPoolSize=1 2024-12-06T10:16:46,974 DEBUG [RS:0;552d6a33fa09:33397 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/552d6a33fa09:0, corePoolSize=1, maxPoolSize=1 2024-12-06T10:16:46,975 DEBUG [RS:0;552d6a33fa09:33397 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/552d6a33fa09:0, corePoolSize=1, maxPoolSize=1 2024-12-06T10:16:46,975 DEBUG [RS:0;552d6a33fa09:33397 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/552d6a33fa09:0, corePoolSize=1, maxPoolSize=1 2024-12-06T10:16:46,975 DEBUG [RS:0;552d6a33fa09:33397 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/552d6a33fa09:0, corePoolSize=1, maxPoolSize=1 2024-12-06T10:16:46,975 DEBUG [RS:0;552d6a33fa09:33397 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/552d6a33fa09:0, corePoolSize=3, maxPoolSize=3 2024-12-06T10:16:46,975 DEBUG [RS:0;552d6a33fa09:33397 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0, corePoolSize=3, maxPoolSize=3 2024-12-06T10:16:46,976 INFO [RS:0;552d6a33fa09:33397 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-06T10:16:46,977 INFO [RS:0;552d6a33fa09:33397 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-06T10:16:46,977 INFO [RS:0;552d6a33fa09:33397 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-06T10:16:46,977 INFO [RS:0;552d6a33fa09:33397 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-06T10:16:46,977 INFO [RS:0;552d6a33fa09:33397 {}] hbase.ChoreService(168): Chore ScheduledChore name=552d6a33fa09,33397,1733480204743-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-06T10:16:46,998 INFO [RS:0;552d6a33fa09:33397 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-06T10:16:47,000 INFO [RS:0;552d6a33fa09:33397 {}] hbase.ChoreService(168): Chore ScheduledChore name=552d6a33fa09,33397,1733480204743-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-06T10:16:47,027 INFO [RS:0;552d6a33fa09:33397 {}] regionserver.Replication(204): 552d6a33fa09,33397,1733480204743 started 2024-12-06T10:16:47,027 INFO [RS:0;552d6a33fa09:33397 {}] regionserver.HRegionServer(1767): Serving as 552d6a33fa09,33397,1733480204743, RpcServer on 552d6a33fa09/172.17.0.2:33397, sessionid=0x10066d47c570001 2024-12-06T10:16:47,028 DEBUG [RS:0;552d6a33fa09:33397 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-06T10:16:47,028 DEBUG [RS:0;552d6a33fa09:33397 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 552d6a33fa09,33397,1733480204743 2024-12-06T10:16:47,028 DEBUG [RS:0;552d6a33fa09:33397 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '552d6a33fa09,33397,1733480204743' 2024-12-06T10:16:47,029 DEBUG [RS:0;552d6a33fa09:33397 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-06T10:16:47,030 DEBUG [RS:0;552d6a33fa09:33397 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-06T10:16:47,031 DEBUG [RS:0;552d6a33fa09:33397 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-06T10:16:47,031 DEBUG [RS:0;552d6a33fa09:33397 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-06T10:16:47,031 DEBUG [RS:0;552d6a33fa09:33397 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 552d6a33fa09,33397,1733480204743 2024-12-06T10:16:47,031 DEBUG [RS:0;552d6a33fa09:33397 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '552d6a33fa09,33397,1733480204743' 2024-12-06T10:16:47,031 DEBUG [RS:0;552d6a33fa09:33397 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-06T10:16:47,032 DEBUG [RS:0;552d6a33fa09:33397 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-06T10:16:47,033 DEBUG [RS:0;552d6a33fa09:33397 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-06T10:16:47,033 INFO [RS:0;552d6a33fa09:33397 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-06T10:16:47,033 INFO [RS:0;552d6a33fa09:33397 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-06T10:16:47,139 INFO [RS:0;552d6a33fa09:33397 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-06T10:16:47,143 INFO [RS:0;552d6a33fa09:33397 {}] wal.AbstractFSWAL(500): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=552d6a33fa09%2C33397%2C1733480204743, suffix=, logDir=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/WALs/552d6a33fa09,33397,1733480204743, archiveDir=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/oldWALs, maxLogs=32 2024-12-06T10:16:47,160 DEBUG [RS:0;552d6a33fa09:33397 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(617): When create output stream for /user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/WALs/552d6a33fa09,33397,1733480204743/552d6a33fa09%2C33397%2C1733480204743.1733480207145, exclude list is [], retry=0 2024-12-06T10:16:47,165 DEBUG [RS-EventLoopGroup-3-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:37093,DS-fcabb5ff-393b-4056-813b-f949da145545,DISK] 2024-12-06T10:16:47,169 INFO [RS:0;552d6a33fa09:33397 {}] wal.AbstractFSWAL(841): New WAL /user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/WALs/552d6a33fa09,33397,1733480204743/552d6a33fa09%2C33397%2C1733480204743.1733480207145 2024-12-06T10:16:47,169 DEBUG [RS:0;552d6a33fa09:33397 {}] wal.AbstractFSWAL(925): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:43645:43645)] 2024-12-06T10:16:47,255 INFO [PEWorker-1 {}] util.FSTableDescriptors(140): Updated hbase:meta table descriptor to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1039 2024-12-06T10:16:47,255 INFO [PEWorker-1 {}] regionserver.HRegion(7106): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4 2024-12-06T10:16:47,265 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073741833_1009 (size=32) 2024-12-06T10:16:47,667 DEBUG [PEWorker-1 {}] regionserver.HRegion(894): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-06T10:16:47,671 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-06T10:16:47,674 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-06T10:16:47,674 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:16:47,675 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-06T10:16:47,675 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-06T10:16:47,678 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-06T10:16:47,678 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:16:47,679 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-06T10:16:47,679 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-06T10:16:47,681 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-06T10:16:47,681 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:16:47,682 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-06T10:16:47,683 DEBUG [PEWorker-1 {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/hbase/meta/1588230740 2024-12-06T10:16:47,684 DEBUG [PEWorker-1 {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/hbase/meta/1588230740 2024-12-06T10:16:47,687 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-06T10:16:47,690 DEBUG [PEWorker-1 {}] regionserver.HRegion(1085): writing seq id for 1588230740 2024-12-06T10:16:47,694 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-06T10:16:47,695 INFO [PEWorker-1 {}] regionserver.HRegion(1102): Opened 1588230740; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=74049978, jitterRate=0.10343065857887268}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-06T10:16:47,697 DEBUG [PEWorker-1 {}] regionserver.HRegion(1001): Region open journal for 1588230740: 2024-12-06T10:16:47,697 DEBUG [PEWorker-1 {}] regionserver.HRegion(1681): Closing 1588230740, disabling compactions & flushes 2024-12-06T10:16:47,697 INFO [PEWorker-1 {}] regionserver.HRegion(1703): Closing region hbase:meta,,1.1588230740 2024-12-06T10:16:47,697 DEBUG [PEWorker-1 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:meta,,1.1588230740 2024-12-06T10:16:47,697 DEBUG [PEWorker-1 {}] regionserver.HRegion(1791): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-06T10:16:47,697 DEBUG [PEWorker-1 {}] regionserver.HRegion(1801): Updates disabled for region hbase:meta,,1.1588230740 2024-12-06T10:16:47,699 INFO [PEWorker-1 {}] regionserver.HRegion(1922): Closed hbase:meta,,1.1588230740 2024-12-06T10:16:47,699 DEBUG [PEWorker-1 {}] regionserver.HRegion(1635): Region close journal for 1588230740: 2024-12-06T10:16:47,702 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(96): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, locked=true; InitMetaProcedure table=hbase:meta 2024-12-06T10:16:47,702 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(107): Going to assign meta 2024-12-06T10:16:47,707 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-12-06T10:16:47,716 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-06T10:16:47,718 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(264): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-12-06T10:16:47,870 DEBUG [552d6a33fa09:33169 {}] assignment.AssignmentManager(2444): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-12-06T10:16:47,876 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=552d6a33fa09,33397,1733480204743 2024-12-06T10:16:47,882 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 552d6a33fa09,33397,1733480204743, state=OPENING 2024-12-06T10:16:47,891 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-12-06T10:16:47,893 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33397-0x10066d47c570001, quorum=127.0.0.1:61610, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T10:16:47,893 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33169-0x10066d47c570000, quorum=127.0.0.1:61610, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T10:16:47,894 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-06T10:16:47,894 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-06T10:16:47,896 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE; OpenRegionProcedure 1588230740, server=552d6a33fa09,33397,1733480204743}] 2024-12-06T10:16:48,092 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,33397,1733480204743 2024-12-06T10:16:48,095 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-06T10:16:48,099 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51506, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-06T10:16:48,110 INFO [RS_OPEN_META-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(135): Open hbase:meta,,1.1588230740 2024-12-06T10:16:48,110 INFO [RS_OPEN_META-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(183): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-06T10:16:48,110 INFO [RS_OPEN_META-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor .meta 2024-12-06T10:16:48,114 INFO [RS_OPEN_META-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(500): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=552d6a33fa09%2C33397%2C1733480204743.meta, suffix=.meta, logDir=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/WALs/552d6a33fa09,33397,1733480204743, archiveDir=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/oldWALs, maxLogs=32 2024-12-06T10:16:48,131 DEBUG [RS_OPEN_META-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_META, pid=3}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(617): When create output stream for /user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/WALs/552d6a33fa09,33397,1733480204743/552d6a33fa09%2C33397%2C1733480204743.meta.1733480208116.meta, exclude list is [], retry=0 2024-12-06T10:16:48,135 DEBUG [RS-EventLoopGroup-3-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:37093,DS-fcabb5ff-393b-4056-813b-f949da145545,DISK] 2024-12-06T10:16:48,139 INFO [RS_OPEN_META-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(841): New WAL /user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/WALs/552d6a33fa09,33397,1733480204743/552d6a33fa09%2C33397%2C1733480204743.meta.1733480208116.meta 2024-12-06T10:16:48,139 DEBUG [RS_OPEN_META-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(925): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:43645:43645)] 2024-12-06T10:16:48,140 DEBUG [RS_OPEN_META-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7285): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-12-06T10:16:48,141 DEBUG [RS_OPEN_META-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-12-06T10:16:48,201 DEBUG [RS_OPEN_META-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7999): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-12-06T10:16:48,205 INFO [RS_OPEN_META-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(436): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-12-06T10:16:48,210 DEBUG [RS_OPEN_META-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-12-06T10:16:48,210 DEBUG [RS_OPEN_META-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(894): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-06T10:16:48,210 DEBUG [RS_OPEN_META-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7327): checking encryption for 1588230740 2024-12-06T10:16:48,210 DEBUG [RS_OPEN_META-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7330): checking classloading for 1588230740 2024-12-06T10:16:48,213 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-06T10:16:48,215 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-06T10:16:48,215 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:16:48,216 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-06T10:16:48,217 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-06T10:16:48,218 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-06T10:16:48,218 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:16:48,219 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-06T10:16:48,219 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-06T10:16:48,220 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-06T10:16:48,221 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:16:48,221 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-06T10:16:48,223 DEBUG [RS_OPEN_META-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/hbase/meta/1588230740 2024-12-06T10:16:48,226 DEBUG [RS_OPEN_META-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/hbase/meta/1588230740 2024-12-06T10:16:48,229 DEBUG [RS_OPEN_META-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-06T10:16:48,232 DEBUG [RS_OPEN_META-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1085): writing seq id for 1588230740 2024-12-06T10:16:48,234 INFO [RS_OPEN_META-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1102): Opened 1588230740; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=62868068, jitterRate=-0.06319278478622437}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-06T10:16:48,236 DEBUG [RS_OPEN_META-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1001): Region open journal for 1588230740: 2024-12-06T10:16:48,244 INFO [RS_OPEN_META-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2601): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1733480208085 2024-12-06T10:16:48,255 DEBUG [RS_OPEN_META-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2628): Finished post open deploy task for hbase:meta,,1.1588230740 2024-12-06T10:16:48,256 INFO [RS_OPEN_META-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(164): Opened hbase:meta,,1.1588230740 2024-12-06T10:16:48,257 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=552d6a33fa09,33397,1733480204743 2024-12-06T10:16:48,259 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 552d6a33fa09,33397,1733480204743, state=OPEN 2024-12-06T10:16:48,264 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33397-0x10066d47c570001, quorum=127.0.0.1:61610, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-06T10:16:48,264 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33169-0x10066d47c570000, quorum=127.0.0.1:61610, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-06T10:16:48,264 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-06T10:16:48,264 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-06T10:16:48,268 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=3, resume processing ppid=2 2024-12-06T10:16:48,268 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=3, ppid=2, state=SUCCESS; OpenRegionProcedure 1588230740, server=552d6a33fa09,33397,1733480204743 in 368 msec 2024-12-06T10:16:48,275 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=2, resume processing ppid=1 2024-12-06T10:16:48,275 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=2, ppid=1, state=SUCCESS; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 562 msec 2024-12-06T10:16:48,280 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=1, state=SUCCESS; InitMetaProcedure table=hbase:meta in 1.5180 sec 2024-12-06T10:16:48,280 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] master.HMaster(1088): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1733480208280, completionTime=-1 2024-12-06T10:16:48,281 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] master.ServerManager(907): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-12-06T10:16:48,281 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] assignment.AssignmentManager(1747): Joining cluster... 2024-12-06T10:16:48,320 DEBUG [hconnection-0x68a406ef-shared-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-06T10:16:48,323 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51510, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-06T10:16:48,334 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] assignment.AssignmentManager(1759): Number of RegionServers=1 2024-12-06T10:16:48,334 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(81): ADDED pid=-1, state=WAITING_TIMEOUT; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1733480268334 2024-12-06T10:16:48,334 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(81): ADDED pid=-1, state=WAITING_TIMEOUT; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1733480328334 2024-12-06T10:16:48,334 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] assignment.AssignmentManager(1766): Joined the cluster in 53 msec 2024-12-06T10:16:48,358 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=552d6a33fa09,33169,1733480203965-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-06T10:16:48,358 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=552d6a33fa09,33169,1733480203965-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-06T10:16:48,358 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=552d6a33fa09,33169,1733480203965-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-06T10:16:48,360 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-552d6a33fa09:33169, period=300000, unit=MILLISECONDS is enabled. 2024-12-06T10:16:48,360 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-12-06T10:16:48,365 DEBUG [master/552d6a33fa09:0.Chore.1 {}] janitor.CatalogJanitor(179): 2024-12-06T10:16:48,368 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] master.TableNamespaceManager(92): Namespace table not found. Creating... 2024-12-06T10:16:48,370 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] master.HMaster(2425): Client=null/null create 'hbase:namespace', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '10', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-06T10:16:48,376 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=hbase:namespace 2024-12-06T10:16:48,380 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_PRE_OPERATION 2024-12-06T10:16:48,381 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:16:48,383 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-06T10:16:48,393 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073741835_1011 (size=358) 2024-12-06T10:16:48,798 INFO [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 4d3009c066fbf23693b61104c76d0d3b, NAME => 'hbase:namespace,,1733480208369.4d3009c066fbf23693b61104c76d0d3b.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:namespace', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '10', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4 2024-12-06T10:16:48,807 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073741836_1012 (size=42) 2024-12-06T10:16:48,808 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(894): Instantiated hbase:namespace,,1733480208369.4d3009c066fbf23693b61104c76d0d3b.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-06T10:16:48,809 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1681): Closing 4d3009c066fbf23693b61104c76d0d3b, disabling compactions & flushes 2024-12-06T10:16:48,809 INFO [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1703): Closing region hbase:namespace,,1733480208369.4d3009c066fbf23693b61104c76d0d3b. 2024-12-06T10:16:48,809 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:namespace,,1733480208369.4d3009c066fbf23693b61104c76d0d3b. 2024-12-06T10:16:48,809 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on hbase:namespace,,1733480208369.4d3009c066fbf23693b61104c76d0d3b. after waiting 0 ms 2024-12-06T10:16:48,809 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region hbase:namespace,,1733480208369.4d3009c066fbf23693b61104c76d0d3b. 2024-12-06T10:16:48,809 INFO [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1922): Closed hbase:namespace,,1733480208369.4d3009c066fbf23693b61104c76d0d3b. 2024-12-06T10:16:48,809 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1635): Region close journal for 4d3009c066fbf23693b61104c76d0d3b: 2024-12-06T10:16:48,811 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_ADD_TO_META 2024-12-06T10:16:48,818 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"hbase:namespace,,1733480208369.4d3009c066fbf23693b61104c76d0d3b.","families":{"info":[{"qualifier":"regioninfo","vlen":41,"tag":[],"timestamp":"1733480208812"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733480208812"}]},"ts":"1733480208812"} 2024-12-06T10:16:48,843 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-12-06T10:16:48,845 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-06T10:16:48,848 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"hbase:namespace","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733480208845"}]},"ts":"1733480208845"} 2024-12-06T10:16:48,852 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=hbase:namespace, state=ENABLING in hbase:meta 2024-12-06T10:16:48,858 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:namespace, region=4d3009c066fbf23693b61104c76d0d3b, ASSIGN}] 2024-12-06T10:16:48,861 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:namespace, region=4d3009c066fbf23693b61104c76d0d3b, ASSIGN 2024-12-06T10:16:48,863 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(264): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=hbase:namespace, region=4d3009c066fbf23693b61104c76d0d3b, ASSIGN; state=OFFLINE, location=552d6a33fa09,33397,1733480204743; forceNewPlan=false, retain=false 2024-12-06T10:16:49,014 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=5 updating hbase:meta row=4d3009c066fbf23693b61104c76d0d3b, regionState=OPENING, regionLocation=552d6a33fa09,33397,1733480204743 2024-12-06T10:16:49,018 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE; OpenRegionProcedure 4d3009c066fbf23693b61104c76d0d3b, server=552d6a33fa09,33397,1733480204743}] 2024-12-06T10:16:49,172 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,33397,1733480204743 2024-12-06T10:16:49,179 INFO [RS_OPEN_PRIORITY_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] handler.AssignRegionHandler(135): Open hbase:namespace,,1733480208369.4d3009c066fbf23693b61104c76d0d3b. 2024-12-06T10:16:49,179 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7285): Opening region: {ENCODED => 4d3009c066fbf23693b61104c76d0d3b, NAME => 'hbase:namespace,,1733480208369.4d3009c066fbf23693b61104c76d0d3b.', STARTKEY => '', ENDKEY => ''} 2024-12-06T10:16:49,180 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table namespace 4d3009c066fbf23693b61104c76d0d3b 2024-12-06T10:16:49,180 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(894): Instantiated hbase:namespace,,1733480208369.4d3009c066fbf23693b61104c76d0d3b.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-06T10:16:49,180 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7327): checking encryption for 4d3009c066fbf23693b61104c76d0d3b 2024-12-06T10:16:49,180 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7330): checking classloading for 4d3009c066fbf23693b61104c76d0d3b 2024-12-06T10:16:49,182 INFO [StoreOpener-4d3009c066fbf23693b61104c76d0d3b-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 4d3009c066fbf23693b61104c76d0d3b 2024-12-06T10:16:49,185 INFO [StoreOpener-4d3009c066fbf23693b61104c76d0d3b-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 4d3009c066fbf23693b61104c76d0d3b columnFamilyName info 2024-12-06T10:16:49,185 DEBUG [StoreOpener-4d3009c066fbf23693b61104c76d0d3b-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:16:49,186 INFO [StoreOpener-4d3009c066fbf23693b61104c76d0d3b-1 {}] regionserver.HStore(327): Store=4d3009c066fbf23693b61104c76d0d3b/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-06T10:16:49,187 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/hbase/namespace/4d3009c066fbf23693b61104c76d0d3b 2024-12-06T10:16:49,188 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/hbase/namespace/4d3009c066fbf23693b61104c76d0d3b 2024-12-06T10:16:49,191 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1085): writing seq id for 4d3009c066fbf23693b61104c76d0d3b 2024-12-06T10:16:49,195 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/hbase/namespace/4d3009c066fbf23693b61104c76d0d3b/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-06T10:16:49,196 INFO [RS_OPEN_PRIORITY_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1102): Opened 4d3009c066fbf23693b61104c76d0d3b; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=70585101, jitterRate=0.051799967885017395}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-06T10:16:49,197 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1001): Region open journal for 4d3009c066fbf23693b61104c76d0d3b: 2024-12-06T10:16:49,199 INFO [RS_OPEN_PRIORITY_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegionServer(2601): Post open deploy tasks for hbase:namespace,,1733480208369.4d3009c066fbf23693b61104c76d0d3b., pid=6, masterSystemTime=1733480209172 2024-12-06T10:16:49,202 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegionServer(2628): Finished post open deploy task for hbase:namespace,,1733480208369.4d3009c066fbf23693b61104c76d0d3b. 2024-12-06T10:16:49,203 INFO [RS_OPEN_PRIORITY_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] handler.AssignRegionHandler(164): Opened hbase:namespace,,1733480208369.4d3009c066fbf23693b61104c76d0d3b. 2024-12-06T10:16:49,204 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=5 updating hbase:meta row=4d3009c066fbf23693b61104c76d0d3b, regionState=OPEN, openSeqNum=2, regionLocation=552d6a33fa09,33397,1733480204743 2024-12-06T10:16:49,210 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=6, resume processing ppid=5 2024-12-06T10:16:49,210 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=6, ppid=5, state=SUCCESS; OpenRegionProcedure 4d3009c066fbf23693b61104c76d0d3b, server=552d6a33fa09,33397,1733480204743 in 189 msec 2024-12-06T10:16:49,214 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=5, resume processing ppid=4 2024-12-06T10:16:49,214 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=5, ppid=4, state=SUCCESS; TransitRegionStateProcedure table=hbase:namespace, region=4d3009c066fbf23693b61104c76d0d3b, ASSIGN in 352 msec 2024-12-06T10:16:49,215 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-06T10:16:49,216 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"hbase:namespace","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733480209215"}]},"ts":"1733480209215"} 2024-12-06T10:16:49,218 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=hbase:namespace, state=ENABLED in hbase:meta 2024-12-06T10:16:49,222 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_POST_OPERATION 2024-12-06T10:16:49,224 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=4, state=SUCCESS; CreateTableProcedure table=hbase:namespace in 851 msec 2024-12-06T10:16:49,280 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] zookeeper.ZKUtil(113): master:33169-0x10066d47c570000, quorum=127.0.0.1:61610, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/namespace 2024-12-06T10:16:49,282 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33169-0x10066d47c570000, quorum=127.0.0.1:61610, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/namespace 2024-12-06T10:16:49,282 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33397-0x10066d47c570001, quorum=127.0.0.1:61610, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T10:16:49,283 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33169-0x10066d47c570000, quorum=127.0.0.1:61610, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T10:16:49,312 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=7, state=RUNNABLE:CREATE_NAMESPACE_PREPARE; CreateNamespaceProcedure, namespace=default 2024-12-06T10:16:49,328 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33169-0x10066d47c570000, quorum=127.0.0.1:61610, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/namespace 2024-12-06T10:16:49,333 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=7, state=SUCCESS; CreateNamespaceProcedure, namespace=default in 24 msec 2024-12-06T10:16:49,336 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=8, state=RUNNABLE:CREATE_NAMESPACE_PREPARE; CreateNamespaceProcedure, namespace=hbase 2024-12-06T10:16:49,347 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33169-0x10066d47c570000, quorum=127.0.0.1:61610, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/namespace 2024-12-06T10:16:49,352 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=8, state=SUCCESS; CreateNamespaceProcedure, namespace=hbase in 15 msec 2024-12-06T10:16:49,362 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33169-0x10066d47c570000, quorum=127.0.0.1:61610, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/namespace/default 2024-12-06T10:16:49,365 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33169-0x10066d47c570000, quorum=127.0.0.1:61610, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/namespace/hbase 2024-12-06T10:16:49,365 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] master.HMaster(1218): Master has completed initialization 4.545sec 2024-12-06T10:16:49,367 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-12-06T10:16:49,368 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-12-06T10:16:49,369 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-12-06T10:16:49,369 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-12-06T10:16:49,370 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-12-06T10:16:49,370 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=552d6a33fa09,33169,1733480203965-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-06T10:16:49,371 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=552d6a33fa09,33169,1733480203965-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-12-06T10:16:49,378 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] master.HMaster(1321): Balancer post startup initialization complete, took 0 seconds 2024-12-06T10:16:49,379 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-12-06T10:16:49,379 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=552d6a33fa09,33169,1733480203965-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-06T10:16:49,399 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x46873e4f to 127.0.0.1:61610 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@7e541e88 2024-12-06T10:16:49,399 WARN [Time-limited test {}] client.ZKConnectionRegistry(90): ZKConnectionRegistry is deprecated. See https://hbase.apache.org/book.html#client.rpcconnectionregistry 2024-12-06T10:16:49,407 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5dc5535f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-06T10:16:49,410 DEBUG [Time-limited test {}] nio.NioEventLoop(110): -Dio.netty.noKeySetOptimization: false 2024-12-06T10:16:49,410 DEBUG [Time-limited test {}] nio.NioEventLoop(111): -Dio.netty.selectorAutoRebuildThreshold: 512 2024-12-06T10:16:49,420 DEBUG [hconnection-0x16235b7d-shared-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-06T10:16:49,428 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51516, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-06T10:16:49,437 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1199): Minicluster is up; activeMaster=552d6a33fa09,33169,1733480203965 2024-12-06T10:16:49,482 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: TestAcidGuaranteesWithAdaptivePolicy#testMixedAtomicity Thread=219, OpenFileDescriptor=444, MaxFileDescriptor=1048576, SystemLoadAverage=189, ProcessCount=11, AvailableMemoryMB=7120 2024-12-06T10:16:49,500 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-06T10:16:49,507 INFO [RS-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44010, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-06T10:16:49,516 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-12-06T10:16:49,520 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-06T10:16:49,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] procedure2.ProcedureExecutor(1098): Stored pid=9, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=TestAcidGuarantees 2024-12-06T10:16:49,525 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_PRE_OPERATION 2024-12-06T10:16:49,525 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestAcidGuarantees" procId is: 9 2024-12-06T10:16:49,525 DEBUG [PEWorker-2 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:16:49,527 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-06T10:16:49,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-12-06T10:16:49,537 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073741837_1013 (size=963) 2024-12-06T10:16:49,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-12-06T10:16:49,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-12-06T10:16:49,942 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => b58170106b3730174deb9625aeac23df, NAME => 'TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4 2024-12-06T10:16:49,952 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073741838_1014 (size=53) 2024-12-06T10:16:49,954 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-06T10:16:49,954 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1681): Closing b58170106b3730174deb9625aeac23df, disabling compactions & flushes 2024-12-06T10:16:49,954 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df. 2024-12-06T10:16:49,954 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df. 2024-12-06T10:16:49,954 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df. after waiting 0 ms 2024-12-06T10:16:49,954 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df. 2024-12-06T10:16:49,954 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df. 2024-12-06T10:16:49,954 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1635): Region close journal for b58170106b3730174deb9625aeac23df: 2024-12-06T10:16:49,956 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ADD_TO_META 2024-12-06T10:16:49,956 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df.","families":{"info":[{"qualifier":"regioninfo","vlen":52,"tag":[],"timestamp":"1733480209956"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733480209956"}]},"ts":"1733480209956"} 2024-12-06T10:16:49,959 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-12-06T10:16:49,961 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-06T10:16:49,961 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733480209961"}]},"ts":"1733480209961"} 2024-12-06T10:16:49,964 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLING in hbase:meta 2024-12-06T10:16:49,969 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=10, ppid=9, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=b58170106b3730174deb9625aeac23df, ASSIGN}] 2024-12-06T10:16:49,970 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=10, ppid=9, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=b58170106b3730174deb9625aeac23df, ASSIGN 2024-12-06T10:16:49,972 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(264): Starting pid=10, ppid=9, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=b58170106b3730174deb9625aeac23df, ASSIGN; state=OFFLINE, location=552d6a33fa09,33397,1733480204743; forceNewPlan=false, retain=false 2024-12-06T10:16:50,122 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=10 updating hbase:meta row=b58170106b3730174deb9625aeac23df, regionState=OPENING, regionLocation=552d6a33fa09,33397,1733480204743 2024-12-06T10:16:50,126 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=11, ppid=10, state=RUNNABLE; OpenRegionProcedure b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743}] 2024-12-06T10:16:50,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-12-06T10:16:50,279 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,33397,1733480204743 2024-12-06T10:16:50,286 INFO [RS_OPEN_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df. 2024-12-06T10:16:50,286 DEBUG [RS_OPEN_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(7285): Opening region: {ENCODED => b58170106b3730174deb9625aeac23df, NAME => 'TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df.', STARTKEY => '', ENDKEY => ''} 2024-12-06T10:16:50,287 DEBUG [RS_OPEN_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees b58170106b3730174deb9625aeac23df 2024-12-06T10:16:50,287 DEBUG [RS_OPEN_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-06T10:16:50,287 DEBUG [RS_OPEN_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(7327): checking encryption for b58170106b3730174deb9625aeac23df 2024-12-06T10:16:50,287 DEBUG [RS_OPEN_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(7330): checking classloading for b58170106b3730174deb9625aeac23df 2024-12-06T10:16:50,290 INFO [StoreOpener-b58170106b3730174deb9625aeac23df-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region b58170106b3730174deb9625aeac23df 2024-12-06T10:16:50,294 INFO [StoreOpener-b58170106b3730174deb9625aeac23df-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-06T10:16:50,294 INFO [StoreOpener-b58170106b3730174deb9625aeac23df-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region b58170106b3730174deb9625aeac23df columnFamilyName A 2024-12-06T10:16:50,294 DEBUG [StoreOpener-b58170106b3730174deb9625aeac23df-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:16:50,296 INFO [StoreOpener-b58170106b3730174deb9625aeac23df-1 {}] regionserver.HStore(327): Store=b58170106b3730174deb9625aeac23df/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-06T10:16:50,296 INFO [StoreOpener-b58170106b3730174deb9625aeac23df-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region b58170106b3730174deb9625aeac23df 2024-12-06T10:16:50,298 INFO [StoreOpener-b58170106b3730174deb9625aeac23df-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-06T10:16:50,299 INFO [StoreOpener-b58170106b3730174deb9625aeac23df-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region b58170106b3730174deb9625aeac23df columnFamilyName B 2024-12-06T10:16:50,299 DEBUG [StoreOpener-b58170106b3730174deb9625aeac23df-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:16:50,300 INFO [StoreOpener-b58170106b3730174deb9625aeac23df-1 {}] regionserver.HStore(327): Store=b58170106b3730174deb9625aeac23df/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-06T10:16:50,300 INFO [StoreOpener-b58170106b3730174deb9625aeac23df-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region b58170106b3730174deb9625aeac23df 2024-12-06T10:16:50,302 INFO [StoreOpener-b58170106b3730174deb9625aeac23df-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-06T10:16:50,302 INFO [StoreOpener-b58170106b3730174deb9625aeac23df-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region b58170106b3730174deb9625aeac23df columnFamilyName C 2024-12-06T10:16:50,302 DEBUG [StoreOpener-b58170106b3730174deb9625aeac23df-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:16:50,303 INFO [StoreOpener-b58170106b3730174deb9625aeac23df-1 {}] regionserver.HStore(327): Store=b58170106b3730174deb9625aeac23df/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-06T10:16:50,304 INFO [RS_OPEN_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df. 2024-12-06T10:16:50,305 DEBUG [RS_OPEN_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df 2024-12-06T10:16:50,306 DEBUG [RS_OPEN_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df 2024-12-06T10:16:50,308 DEBUG [RS_OPEN_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-06T10:16:50,311 DEBUG [RS_OPEN_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1085): writing seq id for b58170106b3730174deb9625aeac23df 2024-12-06T10:16:50,314 DEBUG [RS_OPEN_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-06T10:16:50,315 INFO [RS_OPEN_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1102): Opened b58170106b3730174deb9625aeac23df; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=71649479, jitterRate=0.06766043603420258}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-06T10:16:50,316 DEBUG [RS_OPEN_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1001): Region open journal for b58170106b3730174deb9625aeac23df: 2024-12-06T10:16:50,318 INFO [RS_OPEN_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df., pid=11, masterSystemTime=1733480210279 2024-12-06T10:16:50,321 DEBUG [RS_OPEN_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df. 2024-12-06T10:16:50,321 INFO [RS_OPEN_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df. 2024-12-06T10:16:50,322 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=10 updating hbase:meta row=b58170106b3730174deb9625aeac23df, regionState=OPEN, openSeqNum=2, regionLocation=552d6a33fa09,33397,1733480204743 2024-12-06T10:16:50,327 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=11, resume processing ppid=10 2024-12-06T10:16:50,327 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=11, ppid=10, state=SUCCESS; OpenRegionProcedure b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 in 198 msec 2024-12-06T10:16:50,331 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=10, resume processing ppid=9 2024-12-06T10:16:50,331 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=10, ppid=9, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=b58170106b3730174deb9625aeac23df, ASSIGN in 358 msec 2024-12-06T10:16:50,332 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-06T10:16:50,332 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733480210332"}]},"ts":"1733480210332"} 2024-12-06T10:16:50,335 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLED in hbase:meta 2024-12-06T10:16:50,339 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_POST_OPERATION 2024-12-06T10:16:50,341 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=9, state=SUCCESS; CreateTableProcedure table=TestAcidGuarantees in 818 msec 2024-12-06T10:16:50,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-12-06T10:16:50,646 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:TestAcidGuarantees, procId: 9 completed 2024-12-06T10:16:50,651 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x7fdf5682 to 127.0.0.1:61610 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@1f6e36fe 2024-12-06T10:16:50,655 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@e98ea32, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-06T10:16:50,657 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-06T10:16:50,659 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51520, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-06T10:16:50,662 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-06T10:16:50,664 INFO [RS-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44026, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-06T10:16:50,672 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x79d38d10 to 127.0.0.1:61610 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@6f343a4d 2024-12-06T10:16:50,676 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@12885408, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-06T10:16:50,677 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x6c63ae4e to 127.0.0.1:61610 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@22cb07dd 2024-12-06T10:16:50,680 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@62c43377, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-06T10:16:50,682 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x736f1673 to 127.0.0.1:61610 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@478bae6b 2024-12-06T10:16:50,685 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4977266, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-06T10:16:50,686 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x4ee2166f to 127.0.0.1:61610 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@5400112e 2024-12-06T10:16:50,689 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5a8f4734, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-06T10:16:50,690 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x3f34ff67 to 127.0.0.1:61610 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@38766d64 2024-12-06T10:16:50,693 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@18603bb9, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-06T10:16:50,695 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x4b5cad1a to 127.0.0.1:61610 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@295cb1ac 2024-12-06T10:16:50,702 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@72e97e4b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-06T10:16:50,703 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x3c3b736e to 127.0.0.1:61610 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@70267494 2024-12-06T10:16:50,706 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@490457fd, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-06T10:16:50,707 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x767a8485 to 127.0.0.1:61610 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@1d2a8e08 2024-12-06T10:16:50,710 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2c8de680, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-06T10:16:50,712 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x6502d571 to 127.0.0.1:61610 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@2c915d17 2024-12-06T10:16:50,715 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6f6b07e3, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-06T10:16:50,719 DEBUG [hconnection-0x1734c206-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-06T10:16:50,720 DEBUG [hconnection-0x3a31c907-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-06T10:16:50,722 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51524, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-06T10:16:50,724 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51526, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-06T10:16:50,724 DEBUG [hconnection-0x6c4136b4-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-06T10:16:50,727 DEBUG [hconnection-0x2d164ddf-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-06T10:16:50,728 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-06T10:16:50,732 DEBUG [hconnection-0x4fac3183-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-06T10:16:50,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] procedure2.ProcedureExecutor(1098): Stored pid=12, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=12, table=TestAcidGuarantees 2024-12-06T10:16:50,736 DEBUG [hconnection-0x676daafa-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-06T10:16:50,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12 2024-12-06T10:16:50,739 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=12, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=12, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-06T10:16:50,741 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51536, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-06T10:16:50,741 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=12, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=12, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-06T10:16:50,742 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51552, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-06T10:16:50,747 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=13, ppid=12, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-06T10:16:50,760 DEBUG [hconnection-0x169ff269-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-06T10:16:50,762 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51554, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-06T10:16:50,770 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51570, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-06T10:16:50,774 DEBUG [hconnection-0x7e6d3-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-06T10:16:50,774 DEBUG [hconnection-0xc43284f-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-06T10:16:50,777 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51576, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-06T10:16:50,782 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51588, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-06T10:16:50,785 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51604, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-06T10:16:50,801 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing b58170106b3730174deb9625aeac23df 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-12-06T10:16:50,809 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b58170106b3730174deb9625aeac23df, store=A 2024-12-06T10:16:50,810 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:16:50,810 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b58170106b3730174deb9625aeac23df, store=B 2024-12-06T10:16:50,811 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:16:50,811 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b58170106b3730174deb9625aeac23df, store=C 2024-12-06T10:16:50,811 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:16:50,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(8581): Flush requested on b58170106b3730174deb9625aeac23df 2024-12-06T10:16:50,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12 2024-12-06T10:16:50,910 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,33397,1733480204743 2024-12-06T10:16:50,911 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33397 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-12-06T10:16:50,924 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df. 2024-12-06T10:16:50,925 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df. as already flushing 2024-12-06T10:16:50,932 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df. 2024-12-06T10:16:50,932 ERROR [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => b58170106b3730174deb9625aeac23df, NAME => 'TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:16:50,935 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => b58170106b3730174deb9625aeac23df, NAME => 'TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:16:50,948 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/A/1231fe29209a426188f997b44fd54d78 is 50, key is test_row_0/A:col10/1733480210793/Put/seqid=0 2024-12-06T10:16:50,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => b58170106b3730174deb9625aeac23df, NAME => 'TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => b58170106b3730174deb9625aeac23df, NAME => 'TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:16:50,982 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:16:50,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51526 deadline: 1733480270976, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:16:50,984 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:16:50,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 7 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51552 deadline: 1733480270978, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:16:50,986 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:16:50,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 6 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51570 deadline: 1733480270979, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:16:50,987 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:16:50,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 7 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51554 deadline: 1733480270986, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:16:50,989 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:16:50,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51536 deadline: 1733480270986, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:16:51,005 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073741839_1015 (size=16681) 2024-12-06T10:16:51,009 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=13 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/A/1231fe29209a426188f997b44fd54d78 2024-12-06T10:16:51,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12 2024-12-06T10:16:51,104 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,33397,1733480204743 2024-12-06T10:16:51,104 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33397 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-12-06T10:16:51,109 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df. 2024-12-06T10:16:51,109 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df. as already flushing 2024-12-06T10:16:51,109 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df. 2024-12-06T10:16:51,109 ERROR [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => b58170106b3730174deb9625aeac23df, NAME => 'TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:16:51,109 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => b58170106b3730174deb9625aeac23df, NAME => 'TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:16:51,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => b58170106b3730174deb9625aeac23df, NAME => 'TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => b58170106b3730174deb9625aeac23df, NAME => 'TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:16:51,126 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/B/76f1f07f7cdb4edd8f1b105cb20b08e3 is 50, key is test_row_0/B:col10/1733480210793/Put/seqid=0 2024-12-06T10:16:51,138 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:16:51,142 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:16:51,140 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:16:51,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51526 deadline: 1733480271133, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:16:51,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51536 deadline: 1733480271134, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:16:51,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51554 deadline: 1733480271134, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:16:51,144 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:16:51,144 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:16:51,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51552 deadline: 1733480271135, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:16:51,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51570 deadline: 1733480271134, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:16:51,153 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073741840_1016 (size=12001) 2024-12-06T10:16:51,154 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=13 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/B/76f1f07f7cdb4edd8f1b105cb20b08e3 2024-12-06T10:16:51,207 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/C/d609142fb37b4be78d6049eab52474d4 is 50, key is test_row_0/C:col10/1733480210793/Put/seqid=0 2024-12-06T10:16:51,240 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073741841_1017 (size=12001) 2024-12-06T10:16:51,264 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,33397,1733480204743 2024-12-06T10:16:51,267 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33397 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-12-06T10:16:51,271 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df. 2024-12-06T10:16:51,273 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df. as already flushing 2024-12-06T10:16:51,273 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df. 2024-12-06T10:16:51,273 ERROR [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => b58170106b3730174deb9625aeac23df, NAME => 'TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:16:51,273 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => b58170106b3730174deb9625aeac23df, NAME => 'TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:16:51,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => b58170106b3730174deb9625aeac23df, NAME => 'TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => b58170106b3730174deb9625aeac23df, NAME => 'TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:16:51,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12 2024-12-06T10:16:51,348 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:16:51,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51526 deadline: 1733480271347, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:16:51,349 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:16:51,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51570 deadline: 1733480271348, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:16:51,348 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:16:51,351 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51536 deadline: 1733480271348, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:16:51,352 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:16:51,352 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51552 deadline: 1733480271350, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:16:51,352 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:16:51,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51554 deadline: 1733480271351, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:16:51,431 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,33397,1733480204743 2024-12-06T10:16:51,432 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33397 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-12-06T10:16:51,433 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df. 2024-12-06T10:16:51,433 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df. as already flushing 2024-12-06T10:16:51,433 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df. 2024-12-06T10:16:51,433 ERROR [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => b58170106b3730174deb9625aeac23df, NAME => 'TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:16:51,433 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => b58170106b3730174deb9625aeac23df, NAME => 'TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:16:51,434 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => b58170106b3730174deb9625aeac23df, NAME => 'TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => b58170106b3730174deb9625aeac23df, NAME => 'TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:16:51,589 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,33397,1733480204743 2024-12-06T10:16:51,590 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33397 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-12-06T10:16:51,590 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df. 2024-12-06T10:16:51,590 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df. as already flushing 2024-12-06T10:16:51,590 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df. 2024-12-06T10:16:51,590 ERROR [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => b58170106b3730174deb9625aeac23df, NAME => 'TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:16:51,590 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => b58170106b3730174deb9625aeac23df, NAME => 'TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:16:51,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => b58170106b3730174deb9625aeac23df, NAME => 'TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => b58170106b3730174deb9625aeac23df, NAME => 'TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:16:51,642 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=13 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/C/d609142fb37b4be78d6049eab52474d4 2024-12-06T10:16:51,656 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:16:51,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51526 deadline: 1733480271654, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:16:51,658 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:16:51,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51536 deadline: 1733480271655, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:16:51,659 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:16:51,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51552 deadline: 1733480271656, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:16:51,659 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:16:51,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51570 deadline: 1733480271657, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:16:51,662 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:16:51,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51554 deadline: 1733480271662, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:16:51,663 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/A/1231fe29209a426188f997b44fd54d78 as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/A/1231fe29209a426188f997b44fd54d78 2024-12-06T10:16:51,678 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/A/1231fe29209a426188f997b44fd54d78, entries=250, sequenceid=13, filesize=16.3 K 2024-12-06T10:16:51,683 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/B/76f1f07f7cdb4edd8f1b105cb20b08e3 as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/B/76f1f07f7cdb4edd8f1b105cb20b08e3 2024-12-06T10:16:51,699 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/B/76f1f07f7cdb4edd8f1b105cb20b08e3, entries=150, sequenceid=13, filesize=11.7 K 2024-12-06T10:16:51,721 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/C/d609142fb37b4be78d6049eab52474d4 as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/C/d609142fb37b4be78d6049eab52474d4 2024-12-06T10:16:51,735 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/C/d609142fb37b4be78d6049eab52474d4, entries=150, sequenceid=13, filesize=11.7 K 2024-12-06T10:16:51,741 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=147.60 KB/151140 for b58170106b3730174deb9625aeac23df in 940ms, sequenceid=13, compaction requested=false 2024-12-06T10:16:51,742 DEBUG [MemStoreFlusher.0 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestAcidGuarantees' 2024-12-06T10:16:51,744 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for b58170106b3730174deb9625aeac23df: 2024-12-06T10:16:51,745 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,33397,1733480204743 2024-12-06T10:16:51,746 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33397 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-12-06T10:16:51,747 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df. 2024-12-06T10:16:51,747 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2837): Flushing b58170106b3730174deb9625aeac23df 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-12-06T10:16:51,747 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b58170106b3730174deb9625aeac23df, store=A 2024-12-06T10:16:51,748 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:16:51,748 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b58170106b3730174deb9625aeac23df, store=B 2024-12-06T10:16:51,748 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:16:51,748 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b58170106b3730174deb9625aeac23df, store=C 2024-12-06T10:16:51,748 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:16:51,793 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/A/e853622d9a2a4fd785b1d3f1b013a4dc is 50, key is test_row_0/A:col10/1733480210981/Put/seqid=0 2024-12-06T10:16:51,808 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073741842_1018 (size=12001) 2024-12-06T10:16:51,809 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=38 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/A/e853622d9a2a4fd785b1d3f1b013a4dc 2024-12-06T10:16:51,846 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/B/fd118e428104484ea0db3da9294162d9 is 50, key is test_row_0/B:col10/1733480210981/Put/seqid=0 2024-12-06T10:16:51,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12 2024-12-06T10:16:51,877 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073741843_1019 (size=12001) 2024-12-06T10:16:51,883 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=38 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/B/fd118e428104484ea0db3da9294162d9 2024-12-06T10:16:51,922 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/C/edd3d87ad0274960b0e41ac2022ecc87 is 50, key is test_row_0/C:col10/1733480210981/Put/seqid=0 2024-12-06T10:16:51,945 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073741844_1020 (size=12001) 2024-12-06T10:16:51,947 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=38 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/C/edd3d87ad0274960b0e41ac2022ecc87 2024-12-06T10:16:51,970 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/A/e853622d9a2a4fd785b1d3f1b013a4dc as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/A/e853622d9a2a4fd785b1d3f1b013a4dc 2024-12-06T10:16:51,984 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/A/e853622d9a2a4fd785b1d3f1b013a4dc, entries=150, sequenceid=38, filesize=11.7 K 2024-12-06T10:16:51,986 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/B/fd118e428104484ea0db3da9294162d9 as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/B/fd118e428104484ea0db3da9294162d9 2024-12-06T10:16:52,003 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/B/fd118e428104484ea0db3da9294162d9, entries=150, sequenceid=38, filesize=11.7 K 2024-12-06T10:16:52,006 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/C/edd3d87ad0274960b0e41ac2022ecc87 as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/C/edd3d87ad0274960b0e41ac2022ecc87 2024-12-06T10:16:52,022 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/C/edd3d87ad0274960b0e41ac2022ecc87, entries=150, sequenceid=38, filesize=11.7 K 2024-12-06T10:16:52,024 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=0 B/0 for b58170106b3730174deb9625aeac23df in 277ms, sequenceid=38, compaction requested=false 2024-12-06T10:16:52,024 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2538): Flush status journal for b58170106b3730174deb9625aeac23df: 2024-12-06T10:16:52,024 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df. 2024-12-06T10:16:52,025 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=13 2024-12-06T10:16:52,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.HMaster(4106): Remote procedure done, pid=13 2024-12-06T10:16:52,030 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=13, resume processing ppid=12 2024-12-06T10:16:52,031 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=13, ppid=12, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.2800 sec 2024-12-06T10:16:52,034 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=12, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=12, table=TestAcidGuarantees in 1.3020 sec 2024-12-06T10:16:52,188 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing b58170106b3730174deb9625aeac23df 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-06T10:16:52,189 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b58170106b3730174deb9625aeac23df, store=A 2024-12-06T10:16:52,189 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:16:52,189 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b58170106b3730174deb9625aeac23df, store=B 2024-12-06T10:16:52,189 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:16:52,189 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b58170106b3730174deb9625aeac23df, store=C 2024-12-06T10:16:52,189 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:16:52,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(8581): Flush requested on b58170106b3730174deb9625aeac23df 2024-12-06T10:16:52,219 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/A/fb3cfed1a8dc4b6699125f8bb94ccc2f is 50, key is test_row_0/A:col10/1733480212182/Put/seqid=0 2024-12-06T10:16:52,250 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073741845_1021 (size=16681) 2024-12-06T10:16:52,252 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=49 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/A/fb3cfed1a8dc4b6699125f8bb94ccc2f 2024-12-06T10:16:52,274 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/B/590dcd3497664918aeef83bf8b7086eb is 50, key is test_row_0/B:col10/1733480212182/Put/seqid=0 2024-12-06T10:16:52,285 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:16:52,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51552 deadline: 1733480272274, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:16:52,286 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:16:52,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51536 deadline: 1733480272274, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:16:52,294 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:16:52,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51554 deadline: 1733480272281, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:16:52,296 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:16:52,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51570 deadline: 1733480272282, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:16:52,297 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:16:52,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51526 deadline: 1733480272284, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:16:52,303 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073741846_1022 (size=12001) 2024-12-06T10:16:52,305 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=49 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/B/590dcd3497664918aeef83bf8b7086eb 2024-12-06T10:16:52,331 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/C/442b24b635cd4d75b3bd4180ba7497e6 is 50, key is test_row_0/C:col10/1733480212182/Put/seqid=0 2024-12-06T10:16:52,351 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073741847_1023 (size=12001) 2024-12-06T10:16:52,356 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=49 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/C/442b24b635cd4d75b3bd4180ba7497e6 2024-12-06T10:16:52,371 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/A/fb3cfed1a8dc4b6699125f8bb94ccc2f as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/A/fb3cfed1a8dc4b6699125f8bb94ccc2f 2024-12-06T10:16:52,382 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/A/fb3cfed1a8dc4b6699125f8bb94ccc2f, entries=250, sequenceid=49, filesize=16.3 K 2024-12-06T10:16:52,384 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/B/590dcd3497664918aeef83bf8b7086eb as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/B/590dcd3497664918aeef83bf8b7086eb 2024-12-06T10:16:52,395 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/B/590dcd3497664918aeef83bf8b7086eb, entries=150, sequenceid=49, filesize=11.7 K 2024-12-06T10:16:52,401 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/C/442b24b635cd4d75b3bd4180ba7497e6 as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/C/442b24b635cd4d75b3bd4180ba7497e6 2024-12-06T10:16:52,405 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:16:52,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51552 deadline: 1733480272388, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:16:52,407 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:16:52,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51536 deadline: 1733480272389, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:16:52,408 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:16:52,408 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51570 deadline: 1733480272399, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:16:52,409 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:16:52,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51554 deadline: 1733480272399, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:16:52,410 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:16:52,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 25 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51526 deadline: 1733480272401, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:16:52,415 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/C/442b24b635cd4d75b3bd4180ba7497e6, entries=150, sequenceid=49, filesize=11.7 K 2024-12-06T10:16:52,418 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for b58170106b3730174deb9625aeac23df in 229ms, sequenceid=49, compaction requested=true 2024-12-06T10:16:52,419 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for b58170106b3730174deb9625aeac23df: 2024-12-06T10:16:52,423 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store b58170106b3730174deb9625aeac23df:A, priority=-2147483648, current under compaction store size is 1 2024-12-06T10:16:52,423 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T10:16:52,423 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store b58170106b3730174deb9625aeac23df:B, priority=-2147483648, current under compaction store size is 2 2024-12-06T10:16:52,423 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T10:16:52,423 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store b58170106b3730174deb9625aeac23df:C, priority=-2147483648, current under compaction store size is 3 2024-12-06T10:16:52,423 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-06T10:16:52,424 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-06T10:16:52,424 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-06T10:16:52,428 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-06T10:16:52,430 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HStore(1540): b58170106b3730174deb9625aeac23df/B is initiating minor compaction (all files) 2024-12-06T10:16:52,430 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of b58170106b3730174deb9625aeac23df/B in TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df. 2024-12-06T10:16:52,430 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/B/76f1f07f7cdb4edd8f1b105cb20b08e3, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/B/fd118e428104484ea0db3da9294162d9, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/B/590dcd3497664918aeef83bf8b7086eb] into tmpdir=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp, totalSize=35.2 K 2024-12-06T10:16:52,432 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.Compactor(224): Compacting 76f1f07f7cdb4edd8f1b105cb20b08e3, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=13, earliestPutTs=1733480210750 2024-12-06T10:16:52,433 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.Compactor(224): Compacting fd118e428104484ea0db3da9294162d9, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=38, earliestPutTs=1733480210972 2024-12-06T10:16:52,433 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 45363 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-06T10:16:52,434 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HStore(1540): b58170106b3730174deb9625aeac23df/A is initiating minor compaction (all files) 2024-12-06T10:16:52,434 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.Compactor(224): Compacting 590dcd3497664918aeef83bf8b7086eb, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=49, earliestPutTs=1733480212178 2024-12-06T10:16:52,434 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of b58170106b3730174deb9625aeac23df/A in TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df. 2024-12-06T10:16:52,434 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/A/1231fe29209a426188f997b44fd54d78, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/A/e853622d9a2a4fd785b1d3f1b013a4dc, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/A/fb3cfed1a8dc4b6699125f8bb94ccc2f] into tmpdir=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp, totalSize=44.3 K 2024-12-06T10:16:52,436 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1231fe29209a426188f997b44fd54d78, keycount=250, bloomtype=ROW, size=16.3 K, encoding=NONE, compression=NONE, seqNum=13, earliestPutTs=1733480210750 2024-12-06T10:16:52,437 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.Compactor(224): Compacting e853622d9a2a4fd785b1d3f1b013a4dc, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=38, earliestPutTs=1733480210972 2024-12-06T10:16:52,438 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.Compactor(224): Compacting fb3cfed1a8dc4b6699125f8bb94ccc2f, keycount=250, bloomtype=ROW, size=16.3 K, encoding=NONE, compression=NONE, seqNum=49, earliestPutTs=1733480212172 2024-12-06T10:16:52,479 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): b58170106b3730174deb9625aeac23df#B#compaction#9 average throughput is 0.60 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-06T10:16:52,482 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/B/76936e39d6634f87b15c3dae405e3873 is 50, key is test_row_0/B:col10/1733480212182/Put/seqid=0 2024-12-06T10:16:52,498 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): b58170106b3730174deb9625aeac23df#A#compaction#10 average throughput is 0.36 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-06T10:16:52,500 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/A/c83210370f75493ea2bcbe5fa160cf0a is 50, key is test_row_0/A:col10/1733480212182/Put/seqid=0 2024-12-06T10:16:52,518 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073741848_1024 (size=12104) 2024-12-06T10:16:52,520 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073741849_1025 (size=12104) 2024-12-06T10:16:52,533 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/B/76936e39d6634f87b15c3dae405e3873 as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/B/76936e39d6634f87b15c3dae405e3873 2024-12-06T10:16:52,558 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in b58170106b3730174deb9625aeac23df/B of b58170106b3730174deb9625aeac23df into 76936e39d6634f87b15c3dae405e3873(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-06T10:16:52,558 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for b58170106b3730174deb9625aeac23df: 2024-12-06T10:16:52,558 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df., storeName=b58170106b3730174deb9625aeac23df/B, priority=13, startTime=1733480212423; duration=0sec 2024-12-06T10:16:52,559 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-06T10:16:52,559 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: b58170106b3730174deb9625aeac23df:B 2024-12-06T10:16:52,559 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-06T10:16:52,567 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-06T10:16:52,567 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HStore(1540): b58170106b3730174deb9625aeac23df/C is initiating minor compaction (all files) 2024-12-06T10:16:52,567 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of b58170106b3730174deb9625aeac23df/C in TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df. 2024-12-06T10:16:52,567 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/C/d609142fb37b4be78d6049eab52474d4, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/C/edd3d87ad0274960b0e41ac2022ecc87, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/C/442b24b635cd4d75b3bd4180ba7497e6] into tmpdir=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp, totalSize=35.2 K 2024-12-06T10:16:52,569 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.Compactor(224): Compacting d609142fb37b4be78d6049eab52474d4, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=13, earliestPutTs=1733480210750 2024-12-06T10:16:52,570 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.Compactor(224): Compacting edd3d87ad0274960b0e41ac2022ecc87, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=38, earliestPutTs=1733480210972 2024-12-06T10:16:52,574 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.Compactor(224): Compacting 442b24b635cd4d75b3bd4180ba7497e6, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=49, earliestPutTs=1733480212178 2024-12-06T10:16:52,601 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): b58170106b3730174deb9625aeac23df#C#compaction#11 average throughput is 0.73 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-06T10:16:52,605 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/C/dfabba2d3f4741c09080e57da54f3a1e is 50, key is test_row_0/C:col10/1733480212182/Put/seqid=0 2024-12-06T10:16:52,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(8581): Flush requested on b58170106b3730174deb9625aeac23df 2024-12-06T10:16:52,626 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing b58170106b3730174deb9625aeac23df 3/3 column families, dataSize=154.31 KB heapSize=405.05 KB 2024-12-06T10:16:52,626 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b58170106b3730174deb9625aeac23df, store=A 2024-12-06T10:16:52,626 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:16:52,627 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b58170106b3730174deb9625aeac23df, store=B 2024-12-06T10:16:52,627 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:16:52,627 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b58170106b3730174deb9625aeac23df, store=C 2024-12-06T10:16:52,627 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:16:52,633 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073741850_1026 (size=12104) 2024-12-06T10:16:52,649 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/A/45917a5bc9dd488db25c8d4d4769a9f5 is 50, key is test_row_0/A:col10/1733480212268/Put/seqid=0 2024-12-06T10:16:52,657 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:16:52,658 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:16:52,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51526 deadline: 1733480272637, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:16:52,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51554 deadline: 1733480272634, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:16:52,659 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:16:52,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51570 deadline: 1733480272642, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:16:52,661 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:16:52,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51536 deadline: 1733480272655, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:16:52,662 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:16:52,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51552 deadline: 1733480272658, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:16:52,670 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073741851_1027 (size=12001) 2024-12-06T10:16:52,671 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=76 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/A/45917a5bc9dd488db25c8d4d4769a9f5 2024-12-06T10:16:52,689 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/B/6140b90b5d5341be9ea2131a7ccf3848 is 50, key is test_row_0/B:col10/1733480212268/Put/seqid=0 2024-12-06T10:16:52,712 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073741852_1028 (size=12001) 2024-12-06T10:16:52,715 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=76 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/B/6140b90b5d5341be9ea2131a7ccf3848 2024-12-06T10:16:52,752 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/C/3f61562fe0a145a7b909bbc0047ef1c6 is 50, key is test_row_0/C:col10/1733480212268/Put/seqid=0 2024-12-06T10:16:52,764 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:16:52,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51526 deadline: 1733480272761, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:16:52,765 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:16:52,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51570 deadline: 1733480272762, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:16:52,766 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:16:52,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51554 deadline: 1733480272762, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:16:52,767 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:16:52,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51536 deadline: 1733480272763, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:16:52,769 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:16:52,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51552 deadline: 1733480272764, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:16:52,785 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073741853_1029 (size=12001) 2024-12-06T10:16:52,787 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=76 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/C/3f61562fe0a145a7b909bbc0047ef1c6 2024-12-06T10:16:52,798 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/A/45917a5bc9dd488db25c8d4d4769a9f5 as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/A/45917a5bc9dd488db25c8d4d4769a9f5 2024-12-06T10:16:52,814 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/A/45917a5bc9dd488db25c8d4d4769a9f5, entries=150, sequenceid=76, filesize=11.7 K 2024-12-06T10:16:52,818 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/B/6140b90b5d5341be9ea2131a7ccf3848 as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/B/6140b90b5d5341be9ea2131a7ccf3848 2024-12-06T10:16:52,831 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/B/6140b90b5d5341be9ea2131a7ccf3848, entries=150, sequenceid=76, filesize=11.7 K 2024-12-06T10:16:52,833 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/C/3f61562fe0a145a7b909bbc0047ef1c6 as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/C/3f61562fe0a145a7b909bbc0047ef1c6 2024-12-06T10:16:52,844 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/C/3f61562fe0a145a7b909bbc0047ef1c6, entries=150, sequenceid=76, filesize=11.7 K 2024-12-06T10:16:52,846 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~154.31 KB/158010, heapSize ~405 KB/414720, currentSize=46.96 KB/48090 for b58170106b3730174deb9625aeac23df in 220ms, sequenceid=76, compaction requested=false 2024-12-06T10:16:52,846 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for b58170106b3730174deb9625aeac23df: 2024-12-06T10:16:52,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12 2024-12-06T10:16:52,850 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 12 completed 2024-12-06T10:16:52,853 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-06T10:16:52,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] procedure2.ProcedureExecutor(1098): Stored pid=14, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=14, table=TestAcidGuarantees 2024-12-06T10:16:52,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=14 2024-12-06T10:16:52,856 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=14, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=14, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-06T10:16:52,858 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=14, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=14, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-06T10:16:52,858 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=15, ppid=14, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-06T10:16:52,933 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-12-06T10:16:52,934 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:namespace' 2024-12-06T10:16:52,939 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/A/c83210370f75493ea2bcbe5fa160cf0a as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/A/c83210370f75493ea2bcbe5fa160cf0a 2024-12-06T10:16:52,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=14 2024-12-06T10:16:52,961 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in b58170106b3730174deb9625aeac23df/A of b58170106b3730174deb9625aeac23df into c83210370f75493ea2bcbe5fa160cf0a(size=11.8 K), total size for store is 23.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-06T10:16:52,961 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for b58170106b3730174deb9625aeac23df: 2024-12-06T10:16:52,961 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df., storeName=b58170106b3730174deb9625aeac23df/A, priority=13, startTime=1733480212421; duration=0sec 2024-12-06T10:16:52,962 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T10:16:52,962 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: b58170106b3730174deb9625aeac23df:A 2024-12-06T10:16:52,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(8581): Flush requested on b58170106b3730174deb9625aeac23df 2024-12-06T10:16:52,974 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing b58170106b3730174deb9625aeac23df 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-06T10:16:52,974 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b58170106b3730174deb9625aeac23df, store=A 2024-12-06T10:16:52,975 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:16:52,975 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b58170106b3730174deb9625aeac23df, store=B 2024-12-06T10:16:52,975 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:16:52,975 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b58170106b3730174deb9625aeac23df, store=C 2024-12-06T10:16:52,975 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:16:52,993 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/A/d44069da13fd43bf84288f031064c82c is 50, key is test_row_1/A:col10/1733480212971/Put/seqid=0 2024-12-06T10:16:53,006 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073741854_1030 (size=11997) 2024-12-06T10:16:53,007 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=88 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/A/d44069da13fd43bf84288f031064c82c 2024-12-06T10:16:53,010 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,33397,1733480204743 2024-12-06T10:16:53,011 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33397 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=15 2024-12-06T10:16:53,011 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df. 2024-12-06T10:16:53,011 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df. as already flushing 2024-12-06T10:16:53,011 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df. 2024-12-06T10:16:53,011 ERROR [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] handler.RSProcedureHandler(58): pid=15 java.io.IOException: Unable to complete flush {ENCODED => b58170106b3730174deb9625aeac23df, NAME => 'TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:16:53,012 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=15 java.io.IOException: Unable to complete flush {ENCODED => b58170106b3730174deb9625aeac23df, NAME => 'TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:16:53,013 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.HMaster(4114): Remote procedure failed, pid=15 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => b58170106b3730174deb9625aeac23df, NAME => 'TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => b58170106b3730174deb9625aeac23df, NAME => 'TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:16:53,024 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/B/9d7e4d043abe482f91f20f6ed1f3dc86 is 50, key is test_row_1/B:col10/1733480212971/Put/seqid=0 2024-12-06T10:16:53,033 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073741855_1031 (size=9657) 2024-12-06T10:16:53,047 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/C/dfabba2d3f4741c09080e57da54f3a1e as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/C/dfabba2d3f4741c09080e57da54f3a1e 2024-12-06T10:16:53,074 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in b58170106b3730174deb9625aeac23df/C of b58170106b3730174deb9625aeac23df into dfabba2d3f4741c09080e57da54f3a1e(size=11.8 K), total size for store is 23.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-06T10:16:53,074 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for b58170106b3730174deb9625aeac23df: 2024-12-06T10:16:53,074 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df., storeName=b58170106b3730174deb9625aeac23df/C, priority=13, startTime=1733480212423; duration=0sec 2024-12-06T10:16:53,075 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T10:16:53,075 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: b58170106b3730174deb9625aeac23df:C 2024-12-06T10:16:53,110 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:16:53,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51536 deadline: 1733480273100, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:16:53,112 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:16:53,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51526 deadline: 1733480273103, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:16:53,113 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:16:53,113 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51570 deadline: 1733480273105, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:16:53,113 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:16:53,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51552 deadline: 1733480273108, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:16:53,114 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:16:53,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51554 deadline: 1733480273111, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:16:53,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=14 2024-12-06T10:16:53,165 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,33397,1733480204743 2024-12-06T10:16:53,166 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33397 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=15 2024-12-06T10:16:53,166 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df. 2024-12-06T10:16:53,166 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df. as already flushing 2024-12-06T10:16:53,166 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df. 2024-12-06T10:16:53,166 ERROR [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] handler.RSProcedureHandler(58): pid=15 java.io.IOException: Unable to complete flush {ENCODED => b58170106b3730174deb9625aeac23df, NAME => 'TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:16:53,166 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=15 java.io.IOException: Unable to complete flush {ENCODED => b58170106b3730174deb9625aeac23df, NAME => 'TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:16:53,167 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.HMaster(4114): Remote procedure failed, pid=15 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => b58170106b3730174deb9625aeac23df, NAME => 'TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => b58170106b3730174deb9625aeac23df, NAME => 'TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:16:53,219 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:16:53,220 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51526 deadline: 1733480273215, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:16:53,220 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:16:53,221 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51536 deadline: 1733480273215, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:16:53,221 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:16:53,221 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51552 deadline: 1733480273217, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:16:53,222 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:16:53,222 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51570 deadline: 1733480273217, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:16:53,223 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:16:53,223 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51554 deadline: 1733480273217, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:16:53,320 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,33397,1733480204743 2024-12-06T10:16:53,321 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33397 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=15 2024-12-06T10:16:53,321 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df. 2024-12-06T10:16:53,321 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df. as already flushing 2024-12-06T10:16:53,321 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df. 2024-12-06T10:16:53,321 ERROR [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] handler.RSProcedureHandler(58): pid=15 java.io.IOException: Unable to complete flush {ENCODED => b58170106b3730174deb9625aeac23df, NAME => 'TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:16:53,321 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=15 java.io.IOException: Unable to complete flush {ENCODED => b58170106b3730174deb9625aeac23df, NAME => 'TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:16:53,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.HMaster(4114): Remote procedure failed, pid=15 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => b58170106b3730174deb9625aeac23df, NAME => 'TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => b58170106b3730174deb9625aeac23df, NAME => 'TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:16:53,426 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:16:53,427 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:16:53,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51536 deadline: 1733480273423, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:16:53,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51526 deadline: 1733480273423, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:16:53,427 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:16:53,428 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51552 deadline: 1733480273424, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:16:53,428 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:16:53,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51554 deadline: 1733480273425, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:16:53,429 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:16:53,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51570 deadline: 1733480273426, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:16:53,435 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=88 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/B/9d7e4d043abe482f91f20f6ed1f3dc86 2024-12-06T10:16:53,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=14 2024-12-06T10:16:53,463 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/C/af8acf9e74a941aebc71081748d9414a is 50, key is test_row_1/C:col10/1733480212971/Put/seqid=0 2024-12-06T10:16:53,474 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,33397,1733480204743 2024-12-06T10:16:53,475 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33397 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=15 2024-12-06T10:16:53,475 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df. 2024-12-06T10:16:53,475 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df. as already flushing 2024-12-06T10:16:53,475 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df. 2024-12-06T10:16:53,475 ERROR [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] handler.RSProcedureHandler(58): pid=15 java.io.IOException: Unable to complete flush {ENCODED => b58170106b3730174deb9625aeac23df, NAME => 'TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:16:53,476 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=15 java.io.IOException: Unable to complete flush {ENCODED => b58170106b3730174deb9625aeac23df, NAME => 'TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:16:53,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.HMaster(4114): Remote procedure failed, pid=15 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => b58170106b3730174deb9625aeac23df, NAME => 'TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => b58170106b3730174deb9625aeac23df, NAME => 'TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:16:53,500 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073741856_1032 (size=9657) 2024-12-06T10:16:53,629 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,33397,1733480204743 2024-12-06T10:16:53,631 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33397 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=15 2024-12-06T10:16:53,632 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df. 2024-12-06T10:16:53,632 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df. as already flushing 2024-12-06T10:16:53,632 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df. 2024-12-06T10:16:53,632 ERROR [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] handler.RSProcedureHandler(58): pid=15 java.io.IOException: Unable to complete flush {ENCODED => b58170106b3730174deb9625aeac23df, NAME => 'TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:16:53,632 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=15 java.io.IOException: Unable to complete flush {ENCODED => b58170106b3730174deb9625aeac23df, NAME => 'TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:16:53,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.HMaster(4114): Remote procedure failed, pid=15 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => b58170106b3730174deb9625aeac23df, NAME => 'TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => b58170106b3730174deb9625aeac23df, NAME => 'TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:16:53,726 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-12-06T10:16:53,730 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:16:53,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51526 deadline: 1733480273729, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:16:53,732 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:16:53,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51536 deadline: 1733480273731, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:16:53,733 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:16:53,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51552 deadline: 1733480273732, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:16:53,734 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:16:53,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51570 deadline: 1733480273732, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:16:53,735 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:16:53,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51554 deadline: 1733480273733, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:16:53,786 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,33397,1733480204743 2024-12-06T10:16:53,787 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33397 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=15 2024-12-06T10:16:53,787 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df. 2024-12-06T10:16:53,787 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df. as already flushing 2024-12-06T10:16:53,788 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df. 2024-12-06T10:16:53,788 ERROR [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] handler.RSProcedureHandler(58): pid=15 java.io.IOException: Unable to complete flush {ENCODED => b58170106b3730174deb9625aeac23df, NAME => 'TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:16:53,788 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=15 java.io.IOException: Unable to complete flush {ENCODED => b58170106b3730174deb9625aeac23df, NAME => 'TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:16:53,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.HMaster(4114): Remote procedure failed, pid=15 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => b58170106b3730174deb9625aeac23df, NAME => 'TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => b58170106b3730174deb9625aeac23df, NAME => 'TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:16:53,902 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=88 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/C/af8acf9e74a941aebc71081748d9414a 2024-12-06T10:16:53,920 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/A/d44069da13fd43bf84288f031064c82c as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/A/d44069da13fd43bf84288f031064c82c 2024-12-06T10:16:53,932 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/A/d44069da13fd43bf84288f031064c82c, entries=150, sequenceid=88, filesize=11.7 K 2024-12-06T10:16:53,935 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/B/9d7e4d043abe482f91f20f6ed1f3dc86 as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/B/9d7e4d043abe482f91f20f6ed1f3dc86 2024-12-06T10:16:53,941 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,33397,1733480204743 2024-12-06T10:16:53,942 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33397 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=15 2024-12-06T10:16:53,943 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df. 2024-12-06T10:16:53,943 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df. as already flushing 2024-12-06T10:16:53,943 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df. 2024-12-06T10:16:53,943 ERROR [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] handler.RSProcedureHandler(58): pid=15 java.io.IOException: Unable to complete flush {ENCODED => b58170106b3730174deb9625aeac23df, NAME => 'TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:16:53,943 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=15 java.io.IOException: Unable to complete flush {ENCODED => b58170106b3730174deb9625aeac23df, NAME => 'TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:16:53,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.HMaster(4114): Remote procedure failed, pid=15 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => b58170106b3730174deb9625aeac23df, NAME => 'TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => b58170106b3730174deb9625aeac23df, NAME => 'TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:16:53,946 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/B/9d7e4d043abe482f91f20f6ed1f3dc86, entries=100, sequenceid=88, filesize=9.4 K 2024-12-06T10:16:53,948 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/C/af8acf9e74a941aebc71081748d9414a as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/C/af8acf9e74a941aebc71081748d9414a 2024-12-06T10:16:53,961 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/C/af8acf9e74a941aebc71081748d9414a, entries=100, sequenceid=88, filesize=9.4 K 2024-12-06T10:16:53,963 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=154.31 KB/158010 for b58170106b3730174deb9625aeac23df in 988ms, sequenceid=88, compaction requested=true 2024-12-06T10:16:53,963 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for b58170106b3730174deb9625aeac23df: 2024-12-06T10:16:53,963 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store b58170106b3730174deb9625aeac23df:A, priority=-2147483648, current under compaction store size is 1 2024-12-06T10:16:53,963 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T10:16:53,963 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-06T10:16:53,963 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-06T10:16:53,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=14 2024-12-06T10:16:53,964 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store b58170106b3730174deb9625aeac23df:B, priority=-2147483648, current under compaction store size is 2 2024-12-06T10:16:53,964 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T10:16:53,964 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store b58170106b3730174deb9625aeac23df:C, priority=-2147483648, current under compaction store size is 3 2024-12-06T10:16:53,964 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-06T10:16:53,967 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36102 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-06T10:16:53,967 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HStore(1540): b58170106b3730174deb9625aeac23df/A is initiating minor compaction (all files) 2024-12-06T10:16:53,967 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of b58170106b3730174deb9625aeac23df/A in TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df. 2024-12-06T10:16:53,967 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/A/c83210370f75493ea2bcbe5fa160cf0a, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/A/45917a5bc9dd488db25c8d4d4769a9f5, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/A/d44069da13fd43bf84288f031064c82c] into tmpdir=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp, totalSize=35.3 K 2024-12-06T10:16:53,968 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 33762 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-06T10:16:53,968 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HStore(1540): b58170106b3730174deb9625aeac23df/B is initiating minor compaction (all files) 2024-12-06T10:16:53,968 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.Compactor(224): Compacting c83210370f75493ea2bcbe5fa160cf0a, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=49, earliestPutTs=1733480212178 2024-12-06T10:16:53,968 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of b58170106b3730174deb9625aeac23df/B in TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df. 2024-12-06T10:16:53,969 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/B/76936e39d6634f87b15c3dae405e3873, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/B/6140b90b5d5341be9ea2131a7ccf3848, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/B/9d7e4d043abe482f91f20f6ed1f3dc86] into tmpdir=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp, totalSize=33.0 K 2024-12-06T10:16:53,970 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.Compactor(224): Compacting 45917a5bc9dd488db25c8d4d4769a9f5, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=76, earliestPutTs=1733480212268 2024-12-06T10:16:53,971 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.Compactor(224): Compacting 76936e39d6634f87b15c3dae405e3873, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=49, earliestPutTs=1733480212178 2024-12-06T10:16:53,971 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.Compactor(224): Compacting d44069da13fd43bf84288f031064c82c, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=88, earliestPutTs=1733480212637 2024-12-06T10:16:53,973 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.Compactor(224): Compacting 6140b90b5d5341be9ea2131a7ccf3848, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=76, earliestPutTs=1733480212268 2024-12-06T10:16:53,974 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.Compactor(224): Compacting 9d7e4d043abe482f91f20f6ed1f3dc86, keycount=100, bloomtype=ROW, size=9.4 K, encoding=NONE, compression=NONE, seqNum=88, earliestPutTs=1733480212637 2024-12-06T10:16:54,003 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): b58170106b3730174deb9625aeac23df#B#compaction#19 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-06T10:16:54,004 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/B/4c74bf0f893b48b381eea9651b1fde7a is 50, key is test_row_0/B:col10/1733480212268/Put/seqid=0 2024-12-06T10:16:54,006 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): b58170106b3730174deb9625aeac23df#A#compaction#18 average throughput is 0.82 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-06T10:16:54,006 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/A/0fa2abd1dd764753b86c8980f3a423fa is 50, key is test_row_0/A:col10/1733480212268/Put/seqid=0 2024-12-06T10:16:54,031 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073741858_1034 (size=12207) 2024-12-06T10:16:54,046 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/A/0fa2abd1dd764753b86c8980f3a423fa as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/A/0fa2abd1dd764753b86c8980f3a423fa 2024-12-06T10:16:54,055 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073741857_1033 (size=12207) 2024-12-06T10:16:54,068 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/B/4c74bf0f893b48b381eea9651b1fde7a as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/B/4c74bf0f893b48b381eea9651b1fde7a 2024-12-06T10:16:54,076 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in b58170106b3730174deb9625aeac23df/A of b58170106b3730174deb9625aeac23df into 0fa2abd1dd764753b86c8980f3a423fa(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-06T10:16:54,076 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for b58170106b3730174deb9625aeac23df: 2024-12-06T10:16:54,076 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df., storeName=b58170106b3730174deb9625aeac23df/A, priority=13, startTime=1733480213963; duration=0sec 2024-12-06T10:16:54,076 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-06T10:16:54,076 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: b58170106b3730174deb9625aeac23df:A 2024-12-06T10:16:54,076 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-06T10:16:54,079 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 33762 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-06T10:16:54,079 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HStore(1540): b58170106b3730174deb9625aeac23df/C is initiating minor compaction (all files) 2024-12-06T10:16:54,080 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of b58170106b3730174deb9625aeac23df/C in TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df. 2024-12-06T10:16:54,080 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/C/dfabba2d3f4741c09080e57da54f3a1e, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/C/3f61562fe0a145a7b909bbc0047ef1c6, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/C/af8acf9e74a941aebc71081748d9414a] into tmpdir=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp, totalSize=33.0 K 2024-12-06T10:16:54,081 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.Compactor(224): Compacting dfabba2d3f4741c09080e57da54f3a1e, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=49, earliestPutTs=1733480212178 2024-12-06T10:16:54,082 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.Compactor(224): Compacting 3f61562fe0a145a7b909bbc0047ef1c6, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=76, earliestPutTs=1733480212268 2024-12-06T10:16:54,082 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in b58170106b3730174deb9625aeac23df/B of b58170106b3730174deb9625aeac23df into 4c74bf0f893b48b381eea9651b1fde7a(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-06T10:16:54,082 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for b58170106b3730174deb9625aeac23df: 2024-12-06T10:16:54,082 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df., storeName=b58170106b3730174deb9625aeac23df/B, priority=13, startTime=1733480213963; duration=0sec 2024-12-06T10:16:54,083 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T10:16:54,083 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: b58170106b3730174deb9625aeac23df:B 2024-12-06T10:16:54,084 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.Compactor(224): Compacting af8acf9e74a941aebc71081748d9414a, keycount=100, bloomtype=ROW, size=9.4 K, encoding=NONE, compression=NONE, seqNum=88, earliestPutTs=1733480212637 2024-12-06T10:16:54,096 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,33397,1733480204743 2024-12-06T10:16:54,097 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33397 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=15 2024-12-06T10:16:54,097 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df. 2024-12-06T10:16:54,098 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegion(2837): Flushing b58170106b3730174deb9625aeac23df 3/3 column families, dataSize=154.31 KB heapSize=405.05 KB 2024-12-06T10:16:54,098 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b58170106b3730174deb9625aeac23df, store=A 2024-12-06T10:16:54,098 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:16:54,098 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b58170106b3730174deb9625aeac23df, store=B 2024-12-06T10:16:54,098 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:16:54,098 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b58170106b3730174deb9625aeac23df, store=C 2024-12-06T10:16:54,099 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:16:54,103 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): b58170106b3730174deb9625aeac23df#C#compaction#20 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-06T10:16:54,104 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/C/43782ec9c51c477a8cb7b11e6a39250e is 50, key is test_row_0/C:col10/1733480212268/Put/seqid=0 2024-12-06T10:16:54,108 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/A/c20b4e29514d40cbbfbde11c803d5472 is 50, key is test_row_0/A:col10/1733480213101/Put/seqid=0 2024-12-06T10:16:54,125 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073741860_1036 (size=12207) 2024-12-06T10:16:54,131 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073741859_1035 (size=12001) 2024-12-06T10:16:54,134 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=117 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/A/c20b4e29514d40cbbfbde11c803d5472 2024-12-06T10:16:54,148 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/C/43782ec9c51c477a8cb7b11e6a39250e as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/C/43782ec9c51c477a8cb7b11e6a39250e 2024-12-06T10:16:54,162 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/B/d4558731ae29441b98c9af94346c0a30 is 50, key is test_row_0/B:col10/1733480213101/Put/seqid=0 2024-12-06T10:16:54,163 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in b58170106b3730174deb9625aeac23df/C of b58170106b3730174deb9625aeac23df into 43782ec9c51c477a8cb7b11e6a39250e(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-06T10:16:54,163 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for b58170106b3730174deb9625aeac23df: 2024-12-06T10:16:54,163 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df., storeName=b58170106b3730174deb9625aeac23df/C, priority=13, startTime=1733480213964; duration=0sec 2024-12-06T10:16:54,163 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T10:16:54,163 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: b58170106b3730174deb9625aeac23df:C 2024-12-06T10:16:54,198 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073741861_1037 (size=12001) 2024-12-06T10:16:54,209 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=117 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/B/d4558731ae29441b98c9af94346c0a30 2024-12-06T10:16:54,241 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df. as already flushing 2024-12-06T10:16:54,241 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(8581): Flush requested on b58170106b3730174deb9625aeac23df 2024-12-06T10:16:54,249 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/C/2faf0738048046d4b38f7841088e19df is 50, key is test_row_0/C:col10/1733480213101/Put/seqid=0 2024-12-06T10:16:54,253 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:16:54,253 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51536 deadline: 1733480274246, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:16:54,256 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:16:54,257 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51570 deadline: 1733480274250, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:16:54,257 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:16:54,257 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51526 deadline: 1733480274250, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:16:54,258 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:16:54,258 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:16:54,259 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51554 deadline: 1733480274253, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:16:54,259 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51552 deadline: 1733480274253, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:16:54,294 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073741862_1038 (size=12001) 2024-12-06T10:16:54,358 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:16:54,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51536 deadline: 1733480274355, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:16:54,362 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:16:54,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51570 deadline: 1733480274359, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:16:54,363 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:16:54,363 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:16:54,365 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51526 deadline: 1733480274359, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:16:54,365 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51552 deadline: 1733480274360, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:16:54,366 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:16:54,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51554 deadline: 1733480274362, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:16:54,484 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-12-06T10:16:54,484 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-12-06T10:16:54,487 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_namespace 2024-12-06T10:16:54,487 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_namespace Metrics about Tables on a single HBase RegionServer 2024-12-06T10:16:54,488 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-06T10:16:54,488 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-12-06T10:16:54,489 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-12-06T10:16:54,489 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_master_table_store Metrics about Tables on a single HBase RegionServer 2024-12-06T10:16:54,490 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestAcidGuarantees 2024-12-06T10:16:54,490 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestAcidGuarantees Metrics about Tables on a single HBase RegionServer 2024-12-06T10:16:54,562 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:16:54,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51536 deadline: 1733480274561, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:16:54,566 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:16:54,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51570 deadline: 1733480274565, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:16:54,570 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:16:54,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51552 deadline: 1733480274568, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:16:54,571 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:16:54,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51526 deadline: 1733480274569, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:16:54,572 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:16:54,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51554 deadline: 1733480274570, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:16:54,697 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=117 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/C/2faf0738048046d4b38f7841088e19df 2024-12-06T10:16:54,714 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/A/c20b4e29514d40cbbfbde11c803d5472 as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/A/c20b4e29514d40cbbfbde11c803d5472 2024-12-06T10:16:54,728 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/A/c20b4e29514d40cbbfbde11c803d5472, entries=150, sequenceid=117, filesize=11.7 K 2024-12-06T10:16:54,730 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/B/d4558731ae29441b98c9af94346c0a30 as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/B/d4558731ae29441b98c9af94346c0a30 2024-12-06T10:16:54,743 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/B/d4558731ae29441b98c9af94346c0a30, entries=150, sequenceid=117, filesize=11.7 K 2024-12-06T10:16:54,748 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/C/2faf0738048046d4b38f7841088e19df as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/C/2faf0738048046d4b38f7841088e19df 2024-12-06T10:16:54,761 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/C/2faf0738048046d4b38f7841088e19df, entries=150, sequenceid=117, filesize=11.7 K 2024-12-06T10:16:54,763 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegion(3040): Finished flush of dataSize ~154.31 KB/158010, heapSize ~405 KB/414720, currentSize=46.96 KB/48090 for b58170106b3730174deb9625aeac23df in 666ms, sequenceid=117, compaction requested=false 2024-12-06T10:16:54,763 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegion(2538): Flush status journal for b58170106b3730174deb9625aeac23df: 2024-12-06T10:16:54,763 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df. 2024-12-06T10:16:54,763 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=15 2024-12-06T10:16:54,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.HMaster(4106): Remote procedure done, pid=15 2024-12-06T10:16:54,768 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=15, resume processing ppid=14 2024-12-06T10:16:54,768 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=15, ppid=14, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.9070 sec 2024-12-06T10:16:54,772 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=14, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=14, table=TestAcidGuarantees in 1.9170 sec 2024-12-06T10:16:54,898 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing b58170106b3730174deb9625aeac23df 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-12-06T10:16:54,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(8581): Flush requested on b58170106b3730174deb9625aeac23df 2024-12-06T10:16:54,900 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b58170106b3730174deb9625aeac23df, store=A 2024-12-06T10:16:54,900 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:16:54,900 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b58170106b3730174deb9625aeac23df, store=B 2024-12-06T10:16:54,901 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:16:54,901 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b58170106b3730174deb9625aeac23df, store=C 2024-12-06T10:16:54,901 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:16:54,919 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/A/13c7b26f1c4d4c39b8ebda1b80b2c05e is 50, key is test_row_0/A:col10/1733480214250/Put/seqid=0 2024-12-06T10:16:54,943 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:16:54,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51526 deadline: 1733480274935, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:16:54,945 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:16:54,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51570 deadline: 1733480274937, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:16:54,945 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:16:54,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51552 deadline: 1733480274940, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:16:54,946 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:16:54,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51554 deadline: 1733480274942, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:16:54,950 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073741863_1039 (size=14491) 2024-12-06T10:16:54,951 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=131 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/A/13c7b26f1c4d4c39b8ebda1b80b2c05e 2024-12-06T10:16:54,955 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:16:54,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51536 deadline: 1733480274948, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:16:54,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=14 2024-12-06T10:16:54,966 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 14 completed 2024-12-06T10:16:54,968 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-06T10:16:54,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] procedure2.ProcedureExecutor(1098): Stored pid=16, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=16, table=TestAcidGuarantees 2024-12-06T10:16:54,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=16 2024-12-06T10:16:54,971 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=16, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=16, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-06T10:16:54,972 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=16, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=16, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-06T10:16:54,973 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=17, ppid=16, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-06T10:16:54,980 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/B/e2d9d9b845eb4e268e0e22b20edd1a8d is 50, key is test_row_0/B:col10/1733480214250/Put/seqid=0 2024-12-06T10:16:54,994 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073741864_1040 (size=12101) 2024-12-06T10:16:54,995 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=131 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/B/e2d9d9b845eb4e268e0e22b20edd1a8d 2024-12-06T10:16:55,011 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/C/b378b116280541d98a400b811fbf5c0f is 50, key is test_row_0/C:col10/1733480214250/Put/seqid=0 2024-12-06T10:16:55,042 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073741865_1041 (size=12101) 2024-12-06T10:16:55,046 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:16:55,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51526 deadline: 1733480275046, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:16:55,049 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:16:55,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51570 deadline: 1733480275047, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:16:55,051 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:16:55,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51552 deadline: 1733480275049, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:16:55,053 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:16:55,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51554 deadline: 1733480275049, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:16:55,060 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:16:55,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51536 deadline: 1733480275059, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:16:55,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=16 2024-12-06T10:16:55,126 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,33397,1733480204743 2024-12-06T10:16:55,127 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33397 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=17 2024-12-06T10:16:55,127 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df. 2024-12-06T10:16:55,128 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df. as already flushing 2024-12-06T10:16:55,128 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df. 2024-12-06T10:16:55,128 ERROR [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] handler.RSProcedureHandler(58): pid=17 java.io.IOException: Unable to complete flush {ENCODED => b58170106b3730174deb9625aeac23df, NAME => 'TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:16:55,128 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=17 java.io.IOException: Unable to complete flush {ENCODED => b58170106b3730174deb9625aeac23df, NAME => 'TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:16:55,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.HMaster(4114): Remote procedure failed, pid=17 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => b58170106b3730174deb9625aeac23df, NAME => 'TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => b58170106b3730174deb9625aeac23df, NAME => 'TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:16:55,252 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:16:55,253 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51526 deadline: 1733480275250, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:16:55,253 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:16:55,253 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51570 deadline: 1733480275252, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:16:55,258 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:16:55,258 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51552 deadline: 1733480275255, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:16:55,258 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:16:55,261 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51554 deadline: 1733480275256, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:16:55,268 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:16:55,268 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51536 deadline: 1733480275264, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:16:55,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=16 2024-12-06T10:16:55,283 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,33397,1733480204743 2024-12-06T10:16:55,284 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33397 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=17 2024-12-06T10:16:55,284 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df. 2024-12-06T10:16:55,284 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df. as already flushing 2024-12-06T10:16:55,284 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df. 2024-12-06T10:16:55,284 ERROR [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] handler.RSProcedureHandler(58): pid=17 java.io.IOException: Unable to complete flush {ENCODED => b58170106b3730174deb9625aeac23df, NAME => 'TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:16:55,284 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=17 java.io.IOException: Unable to complete flush {ENCODED => b58170106b3730174deb9625aeac23df, NAME => 'TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:16:55,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.HMaster(4114): Remote procedure failed, pid=17 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => b58170106b3730174deb9625aeac23df, NAME => 'TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => b58170106b3730174deb9625aeac23df, NAME => 'TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:16:55,439 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,33397,1733480204743 2024-12-06T10:16:55,441 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33397 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=17 2024-12-06T10:16:55,441 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df. 2024-12-06T10:16:55,441 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df. as already flushing 2024-12-06T10:16:55,442 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df. 2024-12-06T10:16:55,442 ERROR [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] handler.RSProcedureHandler(58): pid=17 java.io.IOException: Unable to complete flush {ENCODED => b58170106b3730174deb9625aeac23df, NAME => 'TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:16:55,442 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=17 java.io.IOException: Unable to complete flush {ENCODED => b58170106b3730174deb9625aeac23df, NAME => 'TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:16:55,443 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.HMaster(4114): Remote procedure failed, pid=17 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => b58170106b3730174deb9625aeac23df, NAME => 'TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => b58170106b3730174deb9625aeac23df, NAME => 'TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:16:55,444 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=131 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/C/b378b116280541d98a400b811fbf5c0f 2024-12-06T10:16:55,457 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/A/13c7b26f1c4d4c39b8ebda1b80b2c05e as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/A/13c7b26f1c4d4c39b8ebda1b80b2c05e 2024-12-06T10:16:55,481 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/A/13c7b26f1c4d4c39b8ebda1b80b2c05e, entries=200, sequenceid=131, filesize=14.2 K 2024-12-06T10:16:55,484 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/B/e2d9d9b845eb4e268e0e22b20edd1a8d as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/B/e2d9d9b845eb4e268e0e22b20edd1a8d 2024-12-06T10:16:55,494 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/B/e2d9d9b845eb4e268e0e22b20edd1a8d, entries=150, sequenceid=131, filesize=11.8 K 2024-12-06T10:16:55,499 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/C/b378b116280541d98a400b811fbf5c0f as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/C/b378b116280541d98a400b811fbf5c0f 2024-12-06T10:16:55,515 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/C/b378b116280541d98a400b811fbf5c0f, entries=150, sequenceid=131, filesize=11.8 K 2024-12-06T10:16:55,517 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=140.89 KB/144270 for b58170106b3730174deb9625aeac23df in 618ms, sequenceid=131, compaction requested=true 2024-12-06T10:16:55,518 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for b58170106b3730174deb9625aeac23df: 2024-12-06T10:16:55,518 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-06T10:16:55,520 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 38699 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-06T10:16:55,520 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HStore(1540): b58170106b3730174deb9625aeac23df/A is initiating minor compaction (all files) 2024-12-06T10:16:55,520 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of b58170106b3730174deb9625aeac23df/A in TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df. 2024-12-06T10:16:55,521 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/A/0fa2abd1dd764753b86c8980f3a423fa, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/A/c20b4e29514d40cbbfbde11c803d5472, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/A/13c7b26f1c4d4c39b8ebda1b80b2c05e] into tmpdir=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp, totalSize=37.8 K 2024-12-06T10:16:55,521 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.Compactor(224): Compacting 0fa2abd1dd764753b86c8980f3a423fa, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=88, earliestPutTs=1733480212268 2024-12-06T10:16:55,522 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.Compactor(224): Compacting c20b4e29514d40cbbfbde11c803d5472, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=117, earliestPutTs=1733480213096 2024-12-06T10:16:55,524 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.Compactor(224): Compacting 13c7b26f1c4d4c39b8ebda1b80b2c05e, keycount=200, bloomtype=ROW, size=14.2 K, encoding=NONE, compression=NONE, seqNum=131, earliestPutTs=1733480214250 2024-12-06T10:16:55,530 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store b58170106b3730174deb9625aeac23df:A, priority=-2147483648, current under compaction store size is 1 2024-12-06T10:16:55,530 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T10:16:55,530 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-06T10:16:55,534 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36309 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-06T10:16:55,534 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HStore(1540): b58170106b3730174deb9625aeac23df/B is initiating minor compaction (all files) 2024-12-06T10:16:55,534 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of b58170106b3730174deb9625aeac23df/B in TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df. 2024-12-06T10:16:55,534 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/B/4c74bf0f893b48b381eea9651b1fde7a, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/B/d4558731ae29441b98c9af94346c0a30, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/B/e2d9d9b845eb4e268e0e22b20edd1a8d] into tmpdir=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp, totalSize=35.5 K 2024-12-06T10:16:55,535 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.Compactor(224): Compacting 4c74bf0f893b48b381eea9651b1fde7a, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=88, earliestPutTs=1733480212268 2024-12-06T10:16:55,537 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.Compactor(224): Compacting d4558731ae29441b98c9af94346c0a30, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=117, earliestPutTs=1733480213096 2024-12-06T10:16:55,538 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.Compactor(224): Compacting e2d9d9b845eb4e268e0e22b20edd1a8d, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=131, earliestPutTs=1733480214250 2024-12-06T10:16:55,544 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store b58170106b3730174deb9625aeac23df:B, priority=-2147483648, current under compaction store size is 2 2024-12-06T10:16:55,544 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T10:16:55,544 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store b58170106b3730174deb9625aeac23df:C, priority=-2147483648, current under compaction store size is 3 2024-12-06T10:16:55,544 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-06T10:16:55,557 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): b58170106b3730174deb9625aeac23df#A#compaction#27 average throughput is 2.18 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-06T10:16:55,559 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/A/46a89faee0ab42e3a7fd35d499c0ec83 is 50, key is test_row_0/A:col10/1733480214250/Put/seqid=0 2024-12-06T10:16:55,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(8581): Flush requested on b58170106b3730174deb9625aeac23df 2024-12-06T10:16:55,567 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): b58170106b3730174deb9625aeac23df#B#compaction#28 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-06T10:16:55,566 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing b58170106b3730174deb9625aeac23df 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-12-06T10:16:55,568 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b58170106b3730174deb9625aeac23df, store=A 2024-12-06T10:16:55,568 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:16:55,568 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b58170106b3730174deb9625aeac23df, store=B 2024-12-06T10:16:55,568 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:16:55,568 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/B/44191a3bc0ce41f98b436888b6d1ef4a is 50, key is test_row_0/B:col10/1733480214250/Put/seqid=0 2024-12-06T10:16:55,568 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b58170106b3730174deb9625aeac23df, store=C 2024-12-06T10:16:55,568 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:16:55,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=16 2024-12-06T10:16:55,582 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073741866_1042 (size=12409) 2024-12-06T10:16:55,584 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:16:55,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51570 deadline: 1733480275576, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:16:55,585 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:16:55,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51526 deadline: 1733480275578, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:16:55,586 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:16:55,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51536 deadline: 1733480275580, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:16:55,587 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:16:55,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51552 deadline: 1733480275581, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:16:55,590 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:16:55,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51554 deadline: 1733480275584, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:16:55,594 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/A/46a89faee0ab42e3a7fd35d499c0ec83 as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/A/46a89faee0ab42e3a7fd35d499c0ec83 2024-12-06T10:16:55,597 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,33397,1733480204743 2024-12-06T10:16:55,598 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33397 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=17 2024-12-06T10:16:55,598 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df. 2024-12-06T10:16:55,598 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df. as already flushing 2024-12-06T10:16:55,598 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df. 2024-12-06T10:16:55,598 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/A/14238d47ee5a4553959923f9040cc238 is 50, key is test_row_0/A:col10/1733480215563/Put/seqid=0 2024-12-06T10:16:55,598 ERROR [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] handler.RSProcedureHandler(58): pid=17 java.io.IOException: Unable to complete flush {ENCODED => b58170106b3730174deb9625aeac23df, NAME => 'TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:16:55,599 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=17 java.io.IOException: Unable to complete flush {ENCODED => b58170106b3730174deb9625aeac23df, NAME => 'TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:16:55,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.HMaster(4114): Remote procedure failed, pid=17 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => b58170106b3730174deb9625aeac23df, NAME => 'TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => b58170106b3730174deb9625aeac23df, NAME => 'TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:16:55,606 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in b58170106b3730174deb9625aeac23df/A of b58170106b3730174deb9625aeac23df into 46a89faee0ab42e3a7fd35d499c0ec83(size=12.1 K), total size for store is 12.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-06T10:16:55,606 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for b58170106b3730174deb9625aeac23df: 2024-12-06T10:16:55,607 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df., storeName=b58170106b3730174deb9625aeac23df/A, priority=13, startTime=1733480215518; duration=0sec 2024-12-06T10:16:55,607 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-06T10:16:55,607 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: b58170106b3730174deb9625aeac23df:A 2024-12-06T10:16:55,608 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-06T10:16:55,612 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36309 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-06T10:16:55,613 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HStore(1540): b58170106b3730174deb9625aeac23df/C is initiating minor compaction (all files) 2024-12-06T10:16:55,613 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of b58170106b3730174deb9625aeac23df/C in TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df. 2024-12-06T10:16:55,613 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/C/43782ec9c51c477a8cb7b11e6a39250e, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/C/2faf0738048046d4b38f7841088e19df, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/C/b378b116280541d98a400b811fbf5c0f] into tmpdir=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp, totalSize=35.5 K 2024-12-06T10:16:55,614 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.Compactor(224): Compacting 43782ec9c51c477a8cb7b11e6a39250e, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=88, earliestPutTs=1733480212268 2024-12-06T10:16:55,614 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.Compactor(224): Compacting 2faf0738048046d4b38f7841088e19df, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=117, earliestPutTs=1733480213096 2024-12-06T10:16:55,615 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.Compactor(224): Compacting b378b116280541d98a400b811fbf5c0f, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=131, earliestPutTs=1733480214250 2024-12-06T10:16:55,624 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073741867_1043 (size=12409) 2024-12-06T10:16:55,632 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): b58170106b3730174deb9625aeac23df#C#compaction#30 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-06T10:16:55,633 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/C/e66f016dcf174196b717df2d1a2fa0af is 50, key is test_row_0/C:col10/1733480214250/Put/seqid=0 2024-12-06T10:16:55,653 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073741868_1044 (size=16931) 2024-12-06T10:16:55,654 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=157 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/A/14238d47ee5a4553959923f9040cc238 2024-12-06T10:16:55,674 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073741869_1045 (size=12409) 2024-12-06T10:16:55,677 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/B/67678503e1dd4dc7be58d3a3eec00817 is 50, key is test_row_0/B:col10/1733480215563/Put/seqid=0 2024-12-06T10:16:55,690 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:16:55,690 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:16:55,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51526 deadline: 1733480275687, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:16:55,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51570 deadline: 1733480275687, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:16:55,691 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:16:55,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51536 deadline: 1733480275689, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:16:55,692 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:16:55,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51552 deadline: 1733480275689, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:16:55,698 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/C/e66f016dcf174196b717df2d1a2fa0af as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/C/e66f016dcf174196b717df2d1a2fa0af 2024-12-06T10:16:55,702 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:16:55,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51554 deadline: 1733480275699, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:16:55,712 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073741870_1046 (size=12151) 2024-12-06T10:16:55,713 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in b58170106b3730174deb9625aeac23df/C of b58170106b3730174deb9625aeac23df into e66f016dcf174196b717df2d1a2fa0af(size=12.1 K), total size for store is 12.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-06T10:16:55,713 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for b58170106b3730174deb9625aeac23df: 2024-12-06T10:16:55,713 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df., storeName=b58170106b3730174deb9625aeac23df/C, priority=13, startTime=1733480215544; duration=0sec 2024-12-06T10:16:55,713 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=157 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/B/67678503e1dd4dc7be58d3a3eec00817 2024-12-06T10:16:55,713 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T10:16:55,713 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: b58170106b3730174deb9625aeac23df:C 2024-12-06T10:16:55,731 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/C/449bf2020d294fe9a087bf1e8d07b25b is 50, key is test_row_0/C:col10/1733480215563/Put/seqid=0 2024-12-06T10:16:55,756 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,33397,1733480204743 2024-12-06T10:16:55,757 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33397 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=17 2024-12-06T10:16:55,757 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df. 2024-12-06T10:16:55,757 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df. as already flushing 2024-12-06T10:16:55,757 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df. 2024-12-06T10:16:55,757 ERROR [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] handler.RSProcedureHandler(58): pid=17 java.io.IOException: Unable to complete flush {ENCODED => b58170106b3730174deb9625aeac23df, NAME => 'TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:16:55,758 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=17 java.io.IOException: Unable to complete flush {ENCODED => b58170106b3730174deb9625aeac23df, NAME => 'TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:16:55,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.HMaster(4114): Remote procedure failed, pid=17 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => b58170106b3730174deb9625aeac23df, NAME => 'TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => b58170106b3730174deb9625aeac23df, NAME => 'TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:16:55,762 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073741871_1047 (size=12151) 2024-12-06T10:16:55,773 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=157 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/C/449bf2020d294fe9a087bf1e8d07b25b 2024-12-06T10:16:55,783 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/A/14238d47ee5a4553959923f9040cc238 as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/A/14238d47ee5a4553959923f9040cc238 2024-12-06T10:16:55,792 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/A/14238d47ee5a4553959923f9040cc238, entries=250, sequenceid=157, filesize=16.5 K 2024-12-06T10:16:55,794 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/B/67678503e1dd4dc7be58d3a3eec00817 as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/B/67678503e1dd4dc7be58d3a3eec00817 2024-12-06T10:16:55,803 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/B/67678503e1dd4dc7be58d3a3eec00817, entries=150, sequenceid=157, filesize=11.9 K 2024-12-06T10:16:55,805 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/C/449bf2020d294fe9a087bf1e8d07b25b as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/C/449bf2020d294fe9a087bf1e8d07b25b 2024-12-06T10:16:55,814 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/C/449bf2020d294fe9a087bf1e8d07b25b, entries=150, sequenceid=157, filesize=11.9 K 2024-12-06T10:16:55,815 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~154.31 KB/158010, heapSize ~405 KB/414720, currentSize=46.96 KB/48090 for b58170106b3730174deb9625aeac23df in 250ms, sequenceid=157, compaction requested=false 2024-12-06T10:16:55,815 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for b58170106b3730174deb9625aeac23df: 2024-12-06T10:16:55,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(8581): Flush requested on b58170106b3730174deb9625aeac23df 2024-12-06T10:16:55,897 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing b58170106b3730174deb9625aeac23df 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-06T10:16:55,897 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b58170106b3730174deb9625aeac23df, store=A 2024-12-06T10:16:55,898 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:16:55,898 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b58170106b3730174deb9625aeac23df, store=B 2024-12-06T10:16:55,898 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:16:55,898 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b58170106b3730174deb9625aeac23df, store=C 2024-12-06T10:16:55,898 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:16:55,910 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,33397,1733480204743 2024-12-06T10:16:55,910 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/A/da3e59c7dee24d62ac5b2e0b43047335 is 50, key is test_row_1/A:col10/1733480215895/Put/seqid=0 2024-12-06T10:16:55,911 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33397 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=17 2024-12-06T10:16:55,911 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df. 2024-12-06T10:16:55,911 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df. as already flushing 2024-12-06T10:16:55,911 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df. 2024-12-06T10:16:55,911 ERROR [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] handler.RSProcedureHandler(58): pid=17 java.io.IOException: Unable to complete flush {ENCODED => b58170106b3730174deb9625aeac23df, NAME => 'TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:16:55,912 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=17 java.io.IOException: Unable to complete flush {ENCODED => b58170106b3730174deb9625aeac23df, NAME => 'TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:16:55,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.HMaster(4114): Remote procedure failed, pid=17 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => b58170106b3730174deb9625aeac23df, NAME => 'TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => b58170106b3730174deb9625aeac23df, NAME => 'TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:16:55,948 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:16:55,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51554 deadline: 1733480275944, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:16:55,948 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:16:55,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51570 deadline: 1733480275945, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:16:55,949 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:16:55,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51526 deadline: 1733480275945, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:16:55,950 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:16:55,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51536 deadline: 1733480275947, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:16:55,951 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:16:55,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51552 deadline: 1733480275948, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:16:55,959 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073741872_1048 (size=14537) 2024-12-06T10:16:55,961 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=172 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/A/da3e59c7dee24d62ac5b2e0b43047335 2024-12-06T10:16:55,992 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/B/35988e34183c474c9b7a7604d14c22a4 is 50, key is test_row_1/B:col10/1733480215895/Put/seqid=0 2024-12-06T10:16:56,029 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073741873_1049 (size=9757) 2024-12-06T10:16:56,037 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/B/44191a3bc0ce41f98b436888b6d1ef4a as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/B/44191a3bc0ce41f98b436888b6d1ef4a 2024-12-06T10:16:56,047 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in b58170106b3730174deb9625aeac23df/B of b58170106b3730174deb9625aeac23df into 44191a3bc0ce41f98b436888b6d1ef4a(size=12.1 K), total size for store is 24.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-06T10:16:56,048 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for b58170106b3730174deb9625aeac23df: 2024-12-06T10:16:56,048 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df., storeName=b58170106b3730174deb9625aeac23df/B, priority=13, startTime=1733480215530; duration=0sec 2024-12-06T10:16:56,048 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T10:16:56,048 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: b58170106b3730174deb9625aeac23df:B 2024-12-06T10:16:56,054 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:16:56,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51570 deadline: 1733480276051, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:16:56,055 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:16:56,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51536 deadline: 1733480276052, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:16:56,055 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:16:56,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51554 deadline: 1733480276052, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:16:56,056 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:16:56,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51552 deadline: 1733480276053, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:16:56,056 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:16:56,057 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51526 deadline: 1733480276053, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:16:56,067 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,33397,1733480204743 2024-12-06T10:16:56,068 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33397 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=17 2024-12-06T10:16:56,068 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df. 2024-12-06T10:16:56,068 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df. as already flushing 2024-12-06T10:16:56,068 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df. 2024-12-06T10:16:56,069 ERROR [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] handler.RSProcedureHandler(58): pid=17 java.io.IOException: Unable to complete flush {ENCODED => b58170106b3730174deb9625aeac23df, NAME => 'TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:16:56,069 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=17 java.io.IOException: Unable to complete flush {ENCODED => b58170106b3730174deb9625aeac23df, NAME => 'TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:16:56,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.HMaster(4114): Remote procedure failed, pid=17 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => b58170106b3730174deb9625aeac23df, NAME => 'TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => b58170106b3730174deb9625aeac23df, NAME => 'TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:16:56,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=16 2024-12-06T10:16:56,221 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,33397,1733480204743 2024-12-06T10:16:56,224 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33397 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=17 2024-12-06T10:16:56,224 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df. 2024-12-06T10:16:56,224 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df. as already flushing 2024-12-06T10:16:56,224 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df. 2024-12-06T10:16:56,224 ERROR [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] handler.RSProcedureHandler(58): pid=17 java.io.IOException: Unable to complete flush {ENCODED => b58170106b3730174deb9625aeac23df, NAME => 'TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:16:56,225 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=17 java.io.IOException: Unable to complete flush {ENCODED => b58170106b3730174deb9625aeac23df, NAME => 'TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:16:56,226 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.HMaster(4114): Remote procedure failed, pid=17 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => b58170106b3730174deb9625aeac23df, NAME => 'TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => b58170106b3730174deb9625aeac23df, NAME => 'TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:16:56,258 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:16:56,259 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51536 deadline: 1733480276257, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:16:56,262 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:16:56,262 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51570 deadline: 1733480276261, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:16:56,263 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:16:56,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51552 deadline: 1733480276261, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:16:56,263 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:16:56,263 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:16:56,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51526 deadline: 1733480276261, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:16:56,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51554 deadline: 1733480276261, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:16:56,379 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,33397,1733480204743 2024-12-06T10:16:56,380 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33397 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=17 2024-12-06T10:16:56,380 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df. 2024-12-06T10:16:56,380 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df. as already flushing 2024-12-06T10:16:56,380 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df. 2024-12-06T10:16:56,380 ERROR [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] handler.RSProcedureHandler(58): pid=17 java.io.IOException: Unable to complete flush {ENCODED => b58170106b3730174deb9625aeac23df, NAME => 'TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:16:56,381 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=17 java.io.IOException: Unable to complete flush {ENCODED => b58170106b3730174deb9625aeac23df, NAME => 'TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:16:56,382 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.HMaster(4114): Remote procedure failed, pid=17 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => b58170106b3730174deb9625aeac23df, NAME => 'TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => b58170106b3730174deb9625aeac23df, NAME => 'TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:16:56,430 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=172 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/B/35988e34183c474c9b7a7604d14c22a4 2024-12-06T10:16:56,469 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/C/67ccf3b07fb24b25b9526bf590a1e31d is 50, key is test_row_1/C:col10/1733480215895/Put/seqid=0 2024-12-06T10:16:56,483 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073741874_1050 (size=9757) 2024-12-06T10:16:56,488 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=172 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/C/67ccf3b07fb24b25b9526bf590a1e31d 2024-12-06T10:16:56,497 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/A/da3e59c7dee24d62ac5b2e0b43047335 as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/A/da3e59c7dee24d62ac5b2e0b43047335 2024-12-06T10:16:56,507 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/A/da3e59c7dee24d62ac5b2e0b43047335, entries=200, sequenceid=172, filesize=14.2 K 2024-12-06T10:16:56,509 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/B/35988e34183c474c9b7a7604d14c22a4 as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/B/35988e34183c474c9b7a7604d14c22a4 2024-12-06T10:16:56,519 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/B/35988e34183c474c9b7a7604d14c22a4, entries=100, sequenceid=172, filesize=9.5 K 2024-12-06T10:16:56,522 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/C/67ccf3b07fb24b25b9526bf590a1e31d as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/C/67ccf3b07fb24b25b9526bf590a1e31d 2024-12-06T10:16:56,530 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/C/67ccf3b07fb24b25b9526bf590a1e31d, entries=100, sequenceid=172, filesize=9.5 K 2024-12-06T10:16:56,534 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=140.89 KB/144270 for b58170106b3730174deb9625aeac23df in 637ms, sequenceid=172, compaction requested=true 2024-12-06T10:16:56,534 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for b58170106b3730174deb9625aeac23df: 2024-12-06T10:16:56,534 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-06T10:16:56,536 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,33397,1733480204743 2024-12-06T10:16:56,536 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 43877 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-06T10:16:56,536 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HStore(1540): b58170106b3730174deb9625aeac23df/A is initiating minor compaction (all files) 2024-12-06T10:16:56,536 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of b58170106b3730174deb9625aeac23df/A in TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df. 2024-12-06T10:16:56,536 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33397 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=17 2024-12-06T10:16:56,536 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/A/46a89faee0ab42e3a7fd35d499c0ec83, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/A/14238d47ee5a4553959923f9040cc238, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/A/da3e59c7dee24d62ac5b2e0b43047335] into tmpdir=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp, totalSize=42.8 K 2024-12-06T10:16:56,536 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df. 2024-12-06T10:16:56,537 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegion(2837): Flushing b58170106b3730174deb9625aeac23df 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-12-06T10:16:56,537 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b58170106b3730174deb9625aeac23df, store=A 2024-12-06T10:16:56,537 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:16:56,537 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b58170106b3730174deb9625aeac23df, store=B 2024-12-06T10:16:56,537 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:16:56,538 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b58170106b3730174deb9625aeac23df, store=C 2024-12-06T10:16:56,538 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:16:56,538 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.Compactor(224): Compacting 46a89faee0ab42e3a7fd35d499c0ec83, keycount=150, bloomtype=ROW, size=12.1 K, encoding=NONE, compression=NONE, seqNum=131, earliestPutTs=1733480214250 2024-12-06T10:16:56,538 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.Compactor(224): Compacting 14238d47ee5a4553959923f9040cc238, keycount=250, bloomtype=ROW, size=16.5 K, encoding=NONE, compression=NONE, seqNum=157, earliestPutTs=1733480214935 2024-12-06T10:16:56,539 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.Compactor(224): Compacting da3e59c7dee24d62ac5b2e0b43047335, keycount=200, bloomtype=ROW, size=14.2 K, encoding=NONE, compression=NONE, seqNum=172, earliestPutTs=1733480215579 2024-12-06T10:16:56,540 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store b58170106b3730174deb9625aeac23df:A, priority=-2147483648, current under compaction store size is 1 2024-12-06T10:16:56,540 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T10:16:56,540 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store b58170106b3730174deb9625aeac23df:B, priority=-2147483648, current under compaction store size is 2 2024-12-06T10:16:56,540 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T10:16:56,540 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-06T10:16:56,540 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store b58170106b3730174deb9625aeac23df:C, priority=-2147483648, current under compaction store size is 3 2024-12-06T10:16:56,540 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-06T10:16:56,544 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 34317 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-06T10:16:56,545 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HStore(1540): b58170106b3730174deb9625aeac23df/B is initiating minor compaction (all files) 2024-12-06T10:16:56,545 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of b58170106b3730174deb9625aeac23df/B in TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df. 2024-12-06T10:16:56,545 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/B/44191a3bc0ce41f98b436888b6d1ef4a, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/B/67678503e1dd4dc7be58d3a3eec00817, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/B/35988e34183c474c9b7a7604d14c22a4] into tmpdir=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp, totalSize=33.5 K 2024-12-06T10:16:56,546 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.Compactor(224): Compacting 44191a3bc0ce41f98b436888b6d1ef4a, keycount=150, bloomtype=ROW, size=12.1 K, encoding=NONE, compression=NONE, seqNum=131, earliestPutTs=1733480214250 2024-12-06T10:16:56,547 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.Compactor(224): Compacting 67678503e1dd4dc7be58d3a3eec00817, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=157, earliestPutTs=1733480214935 2024-12-06T10:16:56,549 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.Compactor(224): Compacting 35988e34183c474c9b7a7604d14c22a4, keycount=100, bloomtype=ROW, size=9.5 K, encoding=NONE, compression=NONE, seqNum=172, earliestPutTs=1733480215895 2024-12-06T10:16:56,550 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/A/901738c6447346059385ada39f9e9a90 is 50, key is test_row_0/A:col10/1733480215935/Put/seqid=0 2024-12-06T10:16:56,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(8581): Flush requested on b58170106b3730174deb9625aeac23df 2024-12-06T10:16:56,569 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df. as already flushing 2024-12-06T10:16:56,570 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): b58170106b3730174deb9625aeac23df#A#compaction#37 average throughput is 1.64 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-06T10:16:56,571 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/A/7f1ae794621149efbc464f6c7e29b5f7 is 50, key is test_row_0/A:col10/1733480215563/Put/seqid=0 2024-12-06T10:16:56,586 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): b58170106b3730174deb9625aeac23df#B#compaction#38 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-06T10:16:56,587 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/B/f59976c62129486982cf5127381d7e8e is 50, key is test_row_0/B:col10/1733480215563/Put/seqid=0 2024-12-06T10:16:56,588 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:16:56,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51554 deadline: 1733480276579, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:16:56,593 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:16:56,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51552 deadline: 1733480276581, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:16:56,595 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:16:56,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 79 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51570 deadline: 1733480276585, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:16:56,596 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:16:56,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 81 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51526 deadline: 1733480276588, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:16:56,596 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:16:56,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 81 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51536 deadline: 1733480276589, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:16:56,639 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073741875_1051 (size=12151) 2024-12-06T10:16:56,692 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073741876_1052 (size=12561) 2024-12-06T10:16:56,694 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:16:56,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51554 deadline: 1733480276692, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:16:56,698 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:16:56,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51552 deadline: 1733480276695, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:16:56,707 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:16:56,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51526 deadline: 1733480276705, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:16:56,707 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:16:56,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 81 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51570 deadline: 1733480276705, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:16:56,708 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:16:56,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51536 deadline: 1733480276705, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:16:56,719 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/A/7f1ae794621149efbc464f6c7e29b5f7 as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/A/7f1ae794621149efbc464f6c7e29b5f7 2024-12-06T10:16:56,720 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073741877_1053 (size=12561) 2024-12-06T10:16:56,732 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in b58170106b3730174deb9625aeac23df/A of b58170106b3730174deb9625aeac23df into 7f1ae794621149efbc464f6c7e29b5f7(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-06T10:16:56,733 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for b58170106b3730174deb9625aeac23df: 2024-12-06T10:16:56,733 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df., storeName=b58170106b3730174deb9625aeac23df/A, priority=13, startTime=1733480216534; duration=0sec 2024-12-06T10:16:56,733 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-06T10:16:56,733 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/B/f59976c62129486982cf5127381d7e8e as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/B/f59976c62129486982cf5127381d7e8e 2024-12-06T10:16:56,734 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: b58170106b3730174deb9625aeac23df:A 2024-12-06T10:16:56,734 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-06T10:16:56,736 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 34317 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-06T10:16:56,737 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HStore(1540): b58170106b3730174deb9625aeac23df/C is initiating minor compaction (all files) 2024-12-06T10:16:56,737 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of b58170106b3730174deb9625aeac23df/C in TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df. 2024-12-06T10:16:56,737 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/C/e66f016dcf174196b717df2d1a2fa0af, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/C/449bf2020d294fe9a087bf1e8d07b25b, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/C/67ccf3b07fb24b25b9526bf590a1e31d] into tmpdir=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp, totalSize=33.5 K 2024-12-06T10:16:56,738 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.Compactor(224): Compacting e66f016dcf174196b717df2d1a2fa0af, keycount=150, bloomtype=ROW, size=12.1 K, encoding=NONE, compression=NONE, seqNum=131, earliestPutTs=1733480214250 2024-12-06T10:16:56,739 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.Compactor(224): Compacting 449bf2020d294fe9a087bf1e8d07b25b, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=157, earliestPutTs=1733480214935 2024-12-06T10:16:56,741 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.Compactor(224): Compacting 67ccf3b07fb24b25b9526bf590a1e31d, keycount=100, bloomtype=ROW, size=9.5 K, encoding=NONE, compression=NONE, seqNum=172, earliestPutTs=1733480215895 2024-12-06T10:16:56,742 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in b58170106b3730174deb9625aeac23df/B of b58170106b3730174deb9625aeac23df into f59976c62129486982cf5127381d7e8e(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-06T10:16:56,742 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for b58170106b3730174deb9625aeac23df: 2024-12-06T10:16:56,743 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df., storeName=b58170106b3730174deb9625aeac23df/B, priority=13, startTime=1733480216540; duration=0sec 2024-12-06T10:16:56,743 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T10:16:56,743 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: b58170106b3730174deb9625aeac23df:B 2024-12-06T10:16:56,768 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): b58170106b3730174deb9625aeac23df#C#compaction#39 average throughput is 0.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-06T10:16:56,769 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/C/cfed5a589e664bd796fb5f031cdfd632 is 50, key is test_row_0/C:col10/1733480215563/Put/seqid=0 2024-12-06T10:16:56,805 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073741878_1054 (size=12561) 2024-12-06T10:16:56,816 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/C/cfed5a589e664bd796fb5f031cdfd632 as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/C/cfed5a589e664bd796fb5f031cdfd632 2024-12-06T10:16:56,827 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in b58170106b3730174deb9625aeac23df/C of b58170106b3730174deb9625aeac23df into cfed5a589e664bd796fb5f031cdfd632(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-06T10:16:56,827 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for b58170106b3730174deb9625aeac23df: 2024-12-06T10:16:56,828 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df., storeName=b58170106b3730174deb9625aeac23df/C, priority=13, startTime=1733480216540; duration=0sec 2024-12-06T10:16:56,828 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T10:16:56,828 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: b58170106b3730174deb9625aeac23df:C 2024-12-06T10:16:56,899 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:16:56,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51554 deadline: 1733480276897, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:16:56,902 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:16:56,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51552 deadline: 1733480276900, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:16:56,912 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:16:56,912 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:16:56,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51536 deadline: 1733480276910, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:16:56,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51570 deadline: 1733480276909, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:16:56,913 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:16:56,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51526 deadline: 1733480276911, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:16:57,040 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=197 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/A/901738c6447346059385ada39f9e9a90 2024-12-06T10:16:57,056 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/B/fe567e62b4a5471a98ca002af5cf538a is 50, key is test_row_0/B:col10/1733480215935/Put/seqid=0 2024-12-06T10:16:57,071 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073741879_1055 (size=12151) 2024-12-06T10:16:57,077 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=197 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/B/fe567e62b4a5471a98ca002af5cf538a 2024-12-06T10:16:57,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=16 2024-12-06T10:16:57,095 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/C/5d2a62038cf046b887758630681c0842 is 50, key is test_row_0/C:col10/1733480215935/Put/seqid=0 2024-12-06T10:16:57,105 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073741880_1056 (size=12151) 2024-12-06T10:16:57,106 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=197 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/C/5d2a62038cf046b887758630681c0842 2024-12-06T10:16:57,115 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/A/901738c6447346059385ada39f9e9a90 as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/A/901738c6447346059385ada39f9e9a90 2024-12-06T10:16:57,124 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/A/901738c6447346059385ada39f9e9a90, entries=150, sequenceid=197, filesize=11.9 K 2024-12-06T10:16:57,126 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/B/fe567e62b4a5471a98ca002af5cf538a as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/B/fe567e62b4a5471a98ca002af5cf538a 2024-12-06T10:16:57,135 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/B/fe567e62b4a5471a98ca002af5cf538a, entries=150, sequenceid=197, filesize=11.9 K 2024-12-06T10:16:57,137 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/C/5d2a62038cf046b887758630681c0842 as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/C/5d2a62038cf046b887758630681c0842 2024-12-06T10:16:57,147 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/C/5d2a62038cf046b887758630681c0842, entries=150, sequenceid=197, filesize=11.9 K 2024-12-06T10:16:57,154 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=60.38 KB/61830 for b58170106b3730174deb9625aeac23df in 616ms, sequenceid=197, compaction requested=false 2024-12-06T10:16:57,154 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegion(2538): Flush status journal for b58170106b3730174deb9625aeac23df: 2024-12-06T10:16:57,154 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df. 2024-12-06T10:16:57,154 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=17 2024-12-06T10:16:57,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.HMaster(4106): Remote procedure done, pid=17 2024-12-06T10:16:57,159 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=17, resume processing ppid=16 2024-12-06T10:16:57,159 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=17, ppid=16, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.1830 sec 2024-12-06T10:16:57,162 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=16, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=16, table=TestAcidGuarantees in 2.1920 sec 2024-12-06T10:16:57,207 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing b58170106b3730174deb9625aeac23df 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-12-06T10:16:57,207 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b58170106b3730174deb9625aeac23df, store=A 2024-12-06T10:16:57,208 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:16:57,208 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b58170106b3730174deb9625aeac23df, store=B 2024-12-06T10:16:57,208 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:16:57,208 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b58170106b3730174deb9625aeac23df, store=C 2024-12-06T10:16:57,208 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:16:57,208 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(8581): Flush requested on b58170106b3730174deb9625aeac23df 2024-12-06T10:16:57,219 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/A/0342b1d298d0456580b390e739ff80a9 is 50, key is test_row_0/A:col10/1733480216579/Put/seqid=0 2024-12-06T10:16:57,231 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073741881_1057 (size=14541) 2024-12-06T10:16:57,251 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:16:57,252 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51570 deadline: 1733480277245, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:16:57,252 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:16:57,252 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51536 deadline: 1733480277245, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:16:57,253 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:16:57,253 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 91 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51526 deadline: 1733480277249, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:16:57,254 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:16:57,254 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51554 deadline: 1733480277246, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:16:57,254 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:16:57,254 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51552 deadline: 1733480277249, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:16:57,354 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:16:57,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51570 deadline: 1733480277354, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:16:57,357 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:16:57,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51526 deadline: 1733480277356, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:16:57,358 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:16:57,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 92 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51554 deadline: 1733480277357, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:16:57,359 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:16:57,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 95 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51552 deadline: 1733480277357, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:16:57,360 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:16:57,360 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 92 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51536 deadline: 1733480277354, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:16:57,561 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:16:57,562 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:16:57,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 97 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51552 deadline: 1733480277561, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:16:57,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 92 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51570 deadline: 1733480277558, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:16:57,566 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:16:57,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 94 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51536 deadline: 1733480277563, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:16:57,566 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:16:57,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 95 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51526 deadline: 1733480277564, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:16:57,567 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:16:57,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 94 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51554 deadline: 1733480277564, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:16:57,632 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=213 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/A/0342b1d298d0456580b390e739ff80a9 2024-12-06T10:16:57,658 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/B/bdc576705f1845c4afb4f0ab9c3ece49 is 50, key is test_row_0/B:col10/1733480216579/Put/seqid=0 2024-12-06T10:16:57,694 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073741882_1058 (size=12151) 2024-12-06T10:16:57,865 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:16:57,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 94 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51570 deadline: 1733480277864, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:16:57,865 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:16:57,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 99 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51552 deadline: 1733480277864, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:16:57,869 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:16:57,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51554 deadline: 1733480277869, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:16:57,870 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:16:57,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 97 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51526 deadline: 1733480277869, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:16:57,873 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:16:57,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51536 deadline: 1733480277872, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:16:58,096 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=213 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/B/bdc576705f1845c4afb4f0ab9c3ece49 2024-12-06T10:16:58,115 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/C/f8850add885e4ed388ecd29cacf0a076 is 50, key is test_row_0/C:col10/1733480216579/Put/seqid=0 2024-12-06T10:16:58,144 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073741883_1059 (size=12151) 2024-12-06T10:16:58,370 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:16:58,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 101 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51552 deadline: 1733480278369, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:16:58,372 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:16:58,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51570 deadline: 1733480278371, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:16:58,372 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:16:58,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51554 deadline: 1733480278371, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:16:58,374 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:16:58,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 99 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51526 deadline: 1733480278372, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:16:58,379 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:16:58,379 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51536 deadline: 1733480278379, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:16:58,545 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=213 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/C/f8850add885e4ed388ecd29cacf0a076 2024-12-06T10:16:58,555 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/A/0342b1d298d0456580b390e739ff80a9 as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/A/0342b1d298d0456580b390e739ff80a9 2024-12-06T10:16:58,563 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/A/0342b1d298d0456580b390e739ff80a9, entries=200, sequenceid=213, filesize=14.2 K 2024-12-06T10:16:58,567 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/B/bdc576705f1845c4afb4f0ab9c3ece49 as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/B/bdc576705f1845c4afb4f0ab9c3ece49 2024-12-06T10:16:58,576 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/B/bdc576705f1845c4afb4f0ab9c3ece49, entries=150, sequenceid=213, filesize=11.9 K 2024-12-06T10:16:58,577 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/C/f8850add885e4ed388ecd29cacf0a076 as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/C/f8850add885e4ed388ecd29cacf0a076 2024-12-06T10:16:58,584 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/C/f8850add885e4ed388ecd29cacf0a076, entries=150, sequenceid=213, filesize=11.9 K 2024-12-06T10:16:58,586 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=134.18 KB/137400 for b58170106b3730174deb9625aeac23df in 1379ms, sequenceid=213, compaction requested=true 2024-12-06T10:16:58,586 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for b58170106b3730174deb9625aeac23df: 2024-12-06T10:16:58,586 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store b58170106b3730174deb9625aeac23df:A, priority=-2147483648, current under compaction store size is 1 2024-12-06T10:16:58,586 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T10:16:58,586 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-06T10:16:58,586 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-06T10:16:58,587 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store b58170106b3730174deb9625aeac23df:B, priority=-2147483648, current under compaction store size is 2 2024-12-06T10:16:58,587 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T10:16:58,587 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store b58170106b3730174deb9625aeac23df:C, priority=-2147483648, current under compaction store size is 3 2024-12-06T10:16:58,587 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-06T10:16:58,588 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36863 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-06T10:16:58,588 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 39253 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-06T10:16:58,588 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HStore(1540): b58170106b3730174deb9625aeac23df/B is initiating minor compaction (all files) 2024-12-06T10:16:58,588 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HStore(1540): b58170106b3730174deb9625aeac23df/A is initiating minor compaction (all files) 2024-12-06T10:16:58,588 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of b58170106b3730174deb9625aeac23df/B in TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df. 2024-12-06T10:16:58,588 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of b58170106b3730174deb9625aeac23df/A in TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df. 2024-12-06T10:16:58,589 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/B/f59976c62129486982cf5127381d7e8e, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/B/fe567e62b4a5471a98ca002af5cf538a, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/B/bdc576705f1845c4afb4f0ab9c3ece49] into tmpdir=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp, totalSize=36.0 K 2024-12-06T10:16:58,589 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/A/7f1ae794621149efbc464f6c7e29b5f7, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/A/901738c6447346059385ada39f9e9a90, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/A/0342b1d298d0456580b390e739ff80a9] into tmpdir=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp, totalSize=38.3 K 2024-12-06T10:16:58,589 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.Compactor(224): Compacting f59976c62129486982cf5127381d7e8e, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=172, earliestPutTs=1733480215563 2024-12-06T10:16:58,589 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.Compactor(224): Compacting 7f1ae794621149efbc464f6c7e29b5f7, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=172, earliestPutTs=1733480215563 2024-12-06T10:16:58,590 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.Compactor(224): Compacting fe567e62b4a5471a98ca002af5cf538a, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=197, earliestPutTs=1733480215935 2024-12-06T10:16:58,590 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.Compactor(224): Compacting 901738c6447346059385ada39f9e9a90, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=197, earliestPutTs=1733480215935 2024-12-06T10:16:58,590 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.Compactor(224): Compacting bdc576705f1845c4afb4f0ab9c3ece49, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=213, earliestPutTs=1733480216579 2024-12-06T10:16:58,591 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.Compactor(224): Compacting 0342b1d298d0456580b390e739ff80a9, keycount=200, bloomtype=ROW, size=14.2 K, encoding=NONE, compression=NONE, seqNum=213, earliestPutTs=1733480216577 2024-12-06T10:16:58,615 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): b58170106b3730174deb9625aeac23df#B#compaction#45 average throughput is 2.18 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-06T10:16:58,616 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): b58170106b3730174deb9625aeac23df#A#compaction#46 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-06T10:16:58,616 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/B/d7115ea069a04e28a8a3e85c3c9b8d5d is 50, key is test_row_0/B:col10/1733480216579/Put/seqid=0 2024-12-06T10:16:58,617 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/A/6022b549f2454721afd9204e1adae00f is 50, key is test_row_0/A:col10/1733480216579/Put/seqid=0 2024-12-06T10:16:58,646 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073741884_1060 (size=12663) 2024-12-06T10:16:58,653 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073741885_1061 (size=12663) 2024-12-06T10:16:58,662 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/B/d7115ea069a04e28a8a3e85c3c9b8d5d as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/B/d7115ea069a04e28a8a3e85c3c9b8d5d 2024-12-06T10:16:58,668 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/A/6022b549f2454721afd9204e1adae00f as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/A/6022b549f2454721afd9204e1adae00f 2024-12-06T10:16:58,679 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in b58170106b3730174deb9625aeac23df/B of b58170106b3730174deb9625aeac23df into d7115ea069a04e28a8a3e85c3c9b8d5d(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-06T10:16:58,679 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in b58170106b3730174deb9625aeac23df/A of b58170106b3730174deb9625aeac23df into 6022b549f2454721afd9204e1adae00f(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-06T10:16:58,679 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for b58170106b3730174deb9625aeac23df: 2024-12-06T10:16:58,679 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for b58170106b3730174deb9625aeac23df: 2024-12-06T10:16:58,679 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df., storeName=b58170106b3730174deb9625aeac23df/B, priority=13, startTime=1733480218586; duration=0sec 2024-12-06T10:16:58,679 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df., storeName=b58170106b3730174deb9625aeac23df/A, priority=13, startTime=1733480218586; duration=0sec 2024-12-06T10:16:58,679 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-06T10:16:58,679 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: b58170106b3730174deb9625aeac23df:A 2024-12-06T10:16:58,679 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-06T10:16:58,680 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T10:16:58,680 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: b58170106b3730174deb9625aeac23df:B 2024-12-06T10:16:58,681 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36863 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-06T10:16:58,681 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HStore(1540): b58170106b3730174deb9625aeac23df/C is initiating minor compaction (all files) 2024-12-06T10:16:58,681 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of b58170106b3730174deb9625aeac23df/C in TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df. 2024-12-06T10:16:58,681 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/C/cfed5a589e664bd796fb5f031cdfd632, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/C/5d2a62038cf046b887758630681c0842, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/C/f8850add885e4ed388ecd29cacf0a076] into tmpdir=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp, totalSize=36.0 K 2024-12-06T10:16:58,682 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.Compactor(224): Compacting cfed5a589e664bd796fb5f031cdfd632, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=172, earliestPutTs=1733480215563 2024-12-06T10:16:58,683 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.Compactor(224): Compacting 5d2a62038cf046b887758630681c0842, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=197, earliestPutTs=1733480215935 2024-12-06T10:16:58,684 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.Compactor(224): Compacting f8850add885e4ed388ecd29cacf0a076, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=213, earliestPutTs=1733480216579 2024-12-06T10:16:58,717 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): b58170106b3730174deb9625aeac23df#C#compaction#47 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-06T10:16:58,718 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/C/99e41068e3a049d3b357a4f5ea0c3083 is 50, key is test_row_0/C:col10/1733480216579/Put/seqid=0 2024-12-06T10:16:58,736 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073741886_1062 (size=12663) 2024-12-06T10:16:58,752 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/C/99e41068e3a049d3b357a4f5ea0c3083 as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/C/99e41068e3a049d3b357a4f5ea0c3083 2024-12-06T10:16:58,761 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in b58170106b3730174deb9625aeac23df/C of b58170106b3730174deb9625aeac23df into 99e41068e3a049d3b357a4f5ea0c3083(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-06T10:16:58,764 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for b58170106b3730174deb9625aeac23df: 2024-12-06T10:16:58,764 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df., storeName=b58170106b3730174deb9625aeac23df/C, priority=13, startTime=1733480218587; duration=0sec 2024-12-06T10:16:58,764 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T10:16:58,764 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: b58170106b3730174deb9625aeac23df:C 2024-12-06T10:16:59,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=16 2024-12-06T10:16:59,081 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 16 completed 2024-12-06T10:16:59,083 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-06T10:16:59,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] procedure2.ProcedureExecutor(1098): Stored pid=18, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=18, table=TestAcidGuarantees 2024-12-06T10:16:59,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=18 2024-12-06T10:16:59,086 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=18, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=18, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-06T10:16:59,087 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=18, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=18, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-06T10:16:59,087 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=19, ppid=18, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-06T10:16:59,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=18 2024-12-06T10:16:59,241 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,33397,1733480204743 2024-12-06T10:16:59,241 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33397 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=19 2024-12-06T10:16:59,242 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df. 2024-12-06T10:16:59,242 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(2837): Flushing b58170106b3730174deb9625aeac23df 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-12-06T10:16:59,242 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b58170106b3730174deb9625aeac23df, store=A 2024-12-06T10:16:59,242 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:16:59,243 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b58170106b3730174deb9625aeac23df, store=B 2024-12-06T10:16:59,243 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:16:59,243 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b58170106b3730174deb9625aeac23df, store=C 2024-12-06T10:16:59,243 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:16:59,250 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/A/6db240beb05749b7905d387c074d6d66 is 50, key is test_row_0/A:col10/1733480217244/Put/seqid=0 2024-12-06T10:16:59,272 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073741887_1063 (size=12151) 2024-12-06T10:16:59,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(8581): Flush requested on b58170106b3730174deb9625aeac23df 2024-12-06T10:16:59,377 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df. as already flushing 2024-12-06T10:16:59,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=18 2024-12-06T10:16:59,400 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:16:59,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51554 deadline: 1733480279397, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:16:59,401 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:16:59,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 101 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51536 deadline: 1733480279397, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:16:59,401 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:16:59,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 105 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51552 deadline: 1733480279399, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:16:59,402 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:16:59,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 105 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51526 deadline: 1733480279399, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:16:59,402 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:16:59,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51570 deadline: 1733480279399, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:16:59,506 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:16:59,506 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 103 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51536 deadline: 1733480279503, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:16:59,506 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:16:59,506 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 104 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51554 deadline: 1733480279503, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:16:59,507 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:16:59,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51570 deadline: 1733480279505, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:16:59,508 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:16:59,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51526 deadline: 1733480279505, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:16:59,508 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:16:59,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51552 deadline: 1733480279504, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:16:59,674 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=239 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/A/6db240beb05749b7905d387c074d6d66 2024-12-06T10:16:59,687 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/B/2203c0b90d6a451586516dcf8ce40eca is 50, key is test_row_0/B:col10/1733480217244/Put/seqid=0 2024-12-06T10:16:59,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=18 2024-12-06T10:16:59,712 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:16:59,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 104 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51570 deadline: 1733480279710, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:16:59,713 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:16:59,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 106 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51554 deadline: 1733480279711, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:16:59,713 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:16:59,711 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:16:59,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 109 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51526 deadline: 1733480279711, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:16:59,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 105 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51536 deadline: 1733480279709, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:16:59,714 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:16:59,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 109 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51552 deadline: 1733480279712, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:16:59,716 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073741888_1064 (size=12151) 2024-12-06T10:17:00,015 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:00,016 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 106 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51570 deadline: 1733480280015, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:00,017 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:00,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 108 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51554 deadline: 1733480280017, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:00,018 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:00,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51536 deadline: 1733480280018, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:00,019 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:00,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 111 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51526 deadline: 1733480280018, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:00,021 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:00,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 111 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51552 deadline: 1733480280019, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:00,117 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=239 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/B/2203c0b90d6a451586516dcf8ce40eca 2024-12-06T10:17:00,134 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/C/0caa2f09f972442dbea12f31d530d59c is 50, key is test_row_0/C:col10/1733480217244/Put/seqid=0 2024-12-06T10:17:00,139 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073741889_1065 (size=12151) 2024-12-06T10:17:00,140 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=239 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/C/0caa2f09f972442dbea12f31d530d59c 2024-12-06T10:17:00,150 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/A/6db240beb05749b7905d387c074d6d66 as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/A/6db240beb05749b7905d387c074d6d66 2024-12-06T10:17:00,158 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/A/6db240beb05749b7905d387c074d6d66, entries=150, sequenceid=239, filesize=11.9 K 2024-12-06T10:17:00,159 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/B/2203c0b90d6a451586516dcf8ce40eca as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/B/2203c0b90d6a451586516dcf8ce40eca 2024-12-06T10:17:00,168 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/B/2203c0b90d6a451586516dcf8ce40eca, entries=150, sequenceid=239, filesize=11.9 K 2024-12-06T10:17:00,170 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/C/0caa2f09f972442dbea12f31d530d59c as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/C/0caa2f09f972442dbea12f31d530d59c 2024-12-06T10:17:00,188 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/C/0caa2f09f972442dbea12f31d530d59c, entries=150, sequenceid=239, filesize=11.9 K 2024-12-06T10:17:00,189 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=73.80 KB/75570 for b58170106b3730174deb9625aeac23df in 947ms, sequenceid=239, compaction requested=false 2024-12-06T10:17:00,189 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(2538): Flush status journal for b58170106b3730174deb9625aeac23df: 2024-12-06T10:17:00,190 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df. 2024-12-06T10:17:00,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=18 2024-12-06T10:17:00,191 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=19 2024-12-06T10:17:00,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.HMaster(4106): Remote procedure done, pid=19 2024-12-06T10:17:00,196 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=19, resume processing ppid=18 2024-12-06T10:17:00,196 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=19, ppid=18, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.1060 sec 2024-12-06T10:17:00,199 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=18, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=18, table=TestAcidGuarantees in 1.1140 sec 2024-12-06T10:17:00,523 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing b58170106b3730174deb9625aeac23df 3/3 column families, dataSize=80.51 KB heapSize=211.69 KB 2024-12-06T10:17:00,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(8581): Flush requested on b58170106b3730174deb9625aeac23df 2024-12-06T10:17:00,524 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b58170106b3730174deb9625aeac23df, store=A 2024-12-06T10:17:00,524 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:17:00,525 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b58170106b3730174deb9625aeac23df, store=B 2024-12-06T10:17:00,525 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:17:00,525 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b58170106b3730174deb9625aeac23df, store=C 2024-12-06T10:17:00,525 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:17:00,532 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/A/bc0e1caccf7b49afa8ad2a743616c193 is 50, key is test_row_0/A:col10/1733480220522/Put/seqid=0 2024-12-06T10:17:00,550 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073741890_1066 (size=14541) 2024-12-06T10:17:00,556 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:00,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 117 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51526 deadline: 1733480280547, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:00,557 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:00,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 114 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51554 deadline: 1733480280550, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:00,556 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:00,557 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:00,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 113 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51536 deadline: 1733480280552, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:00,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 116 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51552 deadline: 1733480280548, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:00,558 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:00,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 112 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51570 deadline: 1733480280553, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:00,659 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:00,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 119 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51526 deadline: 1733480280658, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:00,660 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:00,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 116 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51554 deadline: 1733480280659, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:00,662 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:00,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 114 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51570 deadline: 1733480280660, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:00,662 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:00,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 115 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51536 deadline: 1733480280661, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:00,660 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:00,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 118 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51552 deadline: 1733480280659, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:00,862 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:00,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 118 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51554 deadline: 1733480280862, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:00,867 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:00,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 121 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51526 deadline: 1733480280862, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:00,868 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:00,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 117 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51536 deadline: 1733480280864, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:00,868 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:00,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 116 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51570 deadline: 1733480280864, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:00,869 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:00,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 120 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51552 deadline: 1733480280866, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:00,952 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=256 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/A/bc0e1caccf7b49afa8ad2a743616c193 2024-12-06T10:17:00,964 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/B/98ef89a372d64938a0ac4fbd1a069481 is 50, key is test_row_0/B:col10/1733480220522/Put/seqid=0 2024-12-06T10:17:00,981 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073741891_1067 (size=12151) 2024-12-06T10:17:01,004 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=256 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/B/98ef89a372d64938a0ac4fbd1a069481 2024-12-06T10:17:01,018 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/C/3fd16b652d0d4e9da6bede37c15d6398 is 50, key is test_row_0/C:col10/1733480220522/Put/seqid=0 2024-12-06T10:17:01,043 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073741892_1068 (size=12151) 2024-12-06T10:17:01,045 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=256 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/C/3fd16b652d0d4e9da6bede37c15d6398 2024-12-06T10:17:01,052 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/A/bc0e1caccf7b49afa8ad2a743616c193 as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/A/bc0e1caccf7b49afa8ad2a743616c193 2024-12-06T10:17:01,068 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/A/bc0e1caccf7b49afa8ad2a743616c193, entries=200, sequenceid=256, filesize=14.2 K 2024-12-06T10:17:01,070 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreScanner(1000): StoreScanner already closing. There is no need to updateReaders 2024-12-06T10:17:01,071 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/B/98ef89a372d64938a0ac4fbd1a069481 as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/B/98ef89a372d64938a0ac4fbd1a069481 2024-12-06T10:17:01,080 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/B/98ef89a372d64938a0ac4fbd1a069481, entries=150, sequenceid=256, filesize=11.9 K 2024-12-06T10:17:01,083 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/C/3fd16b652d0d4e9da6bede37c15d6398 as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/C/3fd16b652d0d4e9da6bede37c15d6398 2024-12-06T10:17:01,091 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/C/3fd16b652d0d4e9da6bede37c15d6398, entries=150, sequenceid=256, filesize=11.9 K 2024-12-06T10:17:01,093 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~93.93 KB/96180, heapSize ~246.80 KB/252720, currentSize=107.34 KB/109920 for b58170106b3730174deb9625aeac23df in 570ms, sequenceid=256, compaction requested=true 2024-12-06T10:17:01,093 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for b58170106b3730174deb9625aeac23df: 2024-12-06T10:17:01,093 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store b58170106b3730174deb9625aeac23df:A, priority=-2147483648, current under compaction store size is 1 2024-12-06T10:17:01,093 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T10:17:01,094 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-06T10:17:01,094 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-06T10:17:01,094 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store b58170106b3730174deb9625aeac23df:B, priority=-2147483648, current under compaction store size is 2 2024-12-06T10:17:01,094 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T10:17:01,095 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store b58170106b3730174deb9625aeac23df:C, priority=-2147483648, current under compaction store size is 3 2024-12-06T10:17:01,095 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-06T10:17:01,096 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 39355 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-06T10:17:01,096 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36965 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-06T10:17:01,096 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HStore(1540): b58170106b3730174deb9625aeac23df/B is initiating minor compaction (all files) 2024-12-06T10:17:01,096 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HStore(1540): b58170106b3730174deb9625aeac23df/A is initiating minor compaction (all files) 2024-12-06T10:17:01,096 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of b58170106b3730174deb9625aeac23df/A in TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df. 2024-12-06T10:17:01,096 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of b58170106b3730174deb9625aeac23df/B in TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df. 2024-12-06T10:17:01,096 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/A/6022b549f2454721afd9204e1adae00f, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/A/6db240beb05749b7905d387c074d6d66, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/A/bc0e1caccf7b49afa8ad2a743616c193] into tmpdir=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp, totalSize=38.4 K 2024-12-06T10:17:01,096 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/B/d7115ea069a04e28a8a3e85c3c9b8d5d, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/B/2203c0b90d6a451586516dcf8ce40eca, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/B/98ef89a372d64938a0ac4fbd1a069481] into tmpdir=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp, totalSize=36.1 K 2024-12-06T10:17:01,097 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.Compactor(224): Compacting d7115ea069a04e28a8a3e85c3c9b8d5d, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=213, earliestPutTs=1733480216579 2024-12-06T10:17:01,097 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.Compactor(224): Compacting 2203c0b90d6a451586516dcf8ce40eca, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=239, earliestPutTs=1733480217244 2024-12-06T10:17:01,098 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.Compactor(224): Compacting 98ef89a372d64938a0ac4fbd1a069481, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=256, earliestPutTs=1733480220520 2024-12-06T10:17:01,098 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.Compactor(224): Compacting 6022b549f2454721afd9204e1adae00f, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=213, earliestPutTs=1733480216579 2024-12-06T10:17:01,098 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.Compactor(224): Compacting 6db240beb05749b7905d387c074d6d66, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=239, earliestPutTs=1733480217244 2024-12-06T10:17:01,100 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.Compactor(224): Compacting bc0e1caccf7b49afa8ad2a743616c193, keycount=200, bloomtype=ROW, size=14.2 K, encoding=NONE, compression=NONE, seqNum=256, earliestPutTs=1733480219396 2024-12-06T10:17:01,113 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): b58170106b3730174deb9625aeac23df#B#compaction#54 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-06T10:17:01,114 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/B/250670863cb64b3db3b22dba6ac0e464 is 50, key is test_row_0/B:col10/1733480220522/Put/seqid=0 2024-12-06T10:17:01,117 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): b58170106b3730174deb9625aeac23df#A#compaction#55 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-06T10:17:01,118 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/A/32362215a25f4a30a943f96d2556b778 is 50, key is test_row_0/A:col10/1733480220522/Put/seqid=0 2024-12-06T10:17:01,132 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073741893_1069 (size=12765) 2024-12-06T10:17:01,141 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/B/250670863cb64b3db3b22dba6ac0e464 as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/B/250670863cb64b3db3b22dba6ac0e464 2024-12-06T10:17:01,142 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073741894_1070 (size=12765) 2024-12-06T10:17:01,152 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in b58170106b3730174deb9625aeac23df/B of b58170106b3730174deb9625aeac23df into 250670863cb64b3db3b22dba6ac0e464(size=12.5 K), total size for store is 12.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-06T10:17:01,152 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/A/32362215a25f4a30a943f96d2556b778 as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/A/32362215a25f4a30a943f96d2556b778 2024-12-06T10:17:01,152 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for b58170106b3730174deb9625aeac23df: 2024-12-06T10:17:01,152 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df., storeName=b58170106b3730174deb9625aeac23df/B, priority=13, startTime=1733480221094; duration=0sec 2024-12-06T10:17:01,152 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-06T10:17:01,153 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: b58170106b3730174deb9625aeac23df:B 2024-12-06T10:17:01,153 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-06T10:17:01,155 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36965 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-06T10:17:01,155 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HStore(1540): b58170106b3730174deb9625aeac23df/C is initiating minor compaction (all files) 2024-12-06T10:17:01,156 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of b58170106b3730174deb9625aeac23df/C in TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df. 2024-12-06T10:17:01,156 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/C/99e41068e3a049d3b357a4f5ea0c3083, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/C/0caa2f09f972442dbea12f31d530d59c, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/C/3fd16b652d0d4e9da6bede37c15d6398] into tmpdir=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp, totalSize=36.1 K 2024-12-06T10:17:01,158 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.Compactor(224): Compacting 99e41068e3a049d3b357a4f5ea0c3083, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=213, earliestPutTs=1733480216579 2024-12-06T10:17:01,159 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.Compactor(224): Compacting 0caa2f09f972442dbea12f31d530d59c, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=239, earliestPutTs=1733480217244 2024-12-06T10:17:01,160 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in b58170106b3730174deb9625aeac23df/A of b58170106b3730174deb9625aeac23df into 32362215a25f4a30a943f96d2556b778(size=12.5 K), total size for store is 12.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-06T10:17:01,160 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for b58170106b3730174deb9625aeac23df: 2024-12-06T10:17:01,160 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df., storeName=b58170106b3730174deb9625aeac23df/A, priority=13, startTime=1733480221093; duration=0sec 2024-12-06T10:17:01,160 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T10:17:01,161 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: b58170106b3730174deb9625aeac23df:A 2024-12-06T10:17:01,161 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.Compactor(224): Compacting 3fd16b652d0d4e9da6bede37c15d6398, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=256, earliestPutTs=1733480220520 2024-12-06T10:17:01,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(8581): Flush requested on b58170106b3730174deb9625aeac23df 2024-12-06T10:17:01,172 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): b58170106b3730174deb9625aeac23df#C#compaction#56 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-06T10:17:01,172 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing b58170106b3730174deb9625aeac23df 3/3 column families, dataSize=114.05 KB heapSize=299.58 KB 2024-12-06T10:17:01,172 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b58170106b3730174deb9625aeac23df, store=A 2024-12-06T10:17:01,172 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:17:01,172 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b58170106b3730174deb9625aeac23df, store=B 2024-12-06T10:17:01,172 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:17:01,172 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b58170106b3730174deb9625aeac23df, store=C 2024-12-06T10:17:01,172 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/C/3b3fe211d914436bb3657b4e5b9abf41 is 50, key is test_row_0/C:col10/1733480220522/Put/seqid=0 2024-12-06T10:17:01,173 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:17:01,183 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/A/9f93097ac9394f4e83ca0f0c2c8de661 is 50, key is test_row_0/A:col10/1733480221168/Put/seqid=0 2024-12-06T10:17:01,190 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073741895_1071 (size=12765) 2024-12-06T10:17:01,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=18 2024-12-06T10:17:01,192 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:01,193 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 120 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51570 deadline: 1733480281186, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:01,193 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:01,193 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 123 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51554 deadline: 1733480281189, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:01,193 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:01,194 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 122 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51536 deadline: 1733480281191, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:01,196 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 18 completed 2024-12-06T10:17:01,196 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:01,197 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 125 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51552 deadline: 1733480281192, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:01,198 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-06T10:17:01,199 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] procedure2.ProcedureExecutor(1098): Stored pid=20, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=20, table=TestAcidGuarantees 2024-12-06T10:17:01,201 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=20, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=20, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-06T10:17:01,201 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:01,201 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 126 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51526 deadline: 1733480281196, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:01,202 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=20 2024-12-06T10:17:01,202 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/C/3b3fe211d914436bb3657b4e5b9abf41 as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/C/3b3fe211d914436bb3657b4e5b9abf41 2024-12-06T10:17:01,202 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=20, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=20, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-06T10:17:01,202 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=21, ppid=20, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-06T10:17:01,211 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in b58170106b3730174deb9625aeac23df/C of b58170106b3730174deb9625aeac23df into 3b3fe211d914436bb3657b4e5b9abf41(size=12.5 K), total size for store is 12.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-06T10:17:01,211 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for b58170106b3730174deb9625aeac23df: 2024-12-06T10:17:01,211 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df., storeName=b58170106b3730174deb9625aeac23df/C, priority=13, startTime=1733480221094; duration=0sec 2024-12-06T10:17:01,211 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T10:17:01,211 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: b58170106b3730174deb9625aeac23df:C 2024-12-06T10:17:01,214 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073741896_1072 (size=14741) 2024-12-06T10:17:01,226 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=278 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/A/9f93097ac9394f4e83ca0f0c2c8de661 2024-12-06T10:17:01,243 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/B/2f8f489fc12e476fbc2ff7ad4184cb69 is 50, key is test_row_0/B:col10/1733480221168/Put/seqid=0 2024-12-06T10:17:01,282 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073741897_1073 (size=12301) 2024-12-06T10:17:01,289 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=278 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/B/2f8f489fc12e476fbc2ff7ad4184cb69 2024-12-06T10:17:01,295 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:01,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 122 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51570 deadline: 1733480281294, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:01,298 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:01,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 125 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51554 deadline: 1733480281297, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:01,299 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:01,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 124 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51536 deadline: 1733480281298, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:01,299 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:01,300 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 127 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51552 deadline: 1733480281298, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:01,303 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=20 2024-12-06T10:17:01,305 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/C/22d53536dd1d4f38802c010ded8adf15 is 50, key is test_row_0/C:col10/1733480221168/Put/seqid=0 2024-12-06T10:17:01,306 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:01,306 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 128 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51526 deadline: 1733480281303, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:01,324 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073741898_1074 (size=12301) 2024-12-06T10:17:01,355 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,33397,1733480204743 2024-12-06T10:17:01,356 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33397 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=21 2024-12-06T10:17:01,356 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df. 2024-12-06T10:17:01,357 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df. as already flushing 2024-12-06T10:17:01,357 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df. 2024-12-06T10:17:01,357 ERROR [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] handler.RSProcedureHandler(58): pid=21 java.io.IOException: Unable to complete flush {ENCODED => b58170106b3730174deb9625aeac23df, NAME => 'TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:17:01,357 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=21 java.io.IOException: Unable to complete flush {ENCODED => b58170106b3730174deb9625aeac23df, NAME => 'TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:17:01,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.HMaster(4114): Remote procedure failed, pid=21 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => b58170106b3730174deb9625aeac23df, NAME => 'TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => b58170106b3730174deb9625aeac23df, NAME => 'TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:17:01,500 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:01,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 124 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51570 deadline: 1733480281498, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:01,501 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:01,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 127 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51554 deadline: 1733480281500, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:01,502 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:01,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 126 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51536 deadline: 1733480281501, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:01,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=20 2024-12-06T10:17:01,506 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:01,506 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 129 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51552 deadline: 1733480281505, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:01,510 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:01,510 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,33397,1733480204743 2024-12-06T10:17:01,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 130 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51526 deadline: 1733480281508, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:01,511 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33397 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=21 2024-12-06T10:17:01,511 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df. 2024-12-06T10:17:01,511 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df. as already flushing 2024-12-06T10:17:01,511 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df. 2024-12-06T10:17:01,511 ERROR [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] handler.RSProcedureHandler(58): pid=21 java.io.IOException: Unable to complete flush {ENCODED => b58170106b3730174deb9625aeac23df, NAME => 'TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:17:01,512 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=21 java.io.IOException: Unable to complete flush {ENCODED => b58170106b3730174deb9625aeac23df, NAME => 'TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:17:01,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.HMaster(4114): Remote procedure failed, pid=21 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => b58170106b3730174deb9625aeac23df, NAME => 'TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => b58170106b3730174deb9625aeac23df, NAME => 'TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:17:01,664 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,33397,1733480204743 2024-12-06T10:17:01,665 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33397 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=21 2024-12-06T10:17:01,665 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df. 2024-12-06T10:17:01,665 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df. as already flushing 2024-12-06T10:17:01,665 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df. 2024-12-06T10:17:01,666 ERROR [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] handler.RSProcedureHandler(58): pid=21 java.io.IOException: Unable to complete flush {ENCODED => b58170106b3730174deb9625aeac23df, NAME => 'TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:17:01,666 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=21 java.io.IOException: Unable to complete flush {ENCODED => b58170106b3730174deb9625aeac23df, NAME => 'TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:17:01,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.HMaster(4114): Remote procedure failed, pid=21 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => b58170106b3730174deb9625aeac23df, NAME => 'TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => b58170106b3730174deb9625aeac23df, NAME => 'TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:17:01,725 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=278 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/C/22d53536dd1d4f38802c010ded8adf15 2024-12-06T10:17:01,737 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/A/9f93097ac9394f4e83ca0f0c2c8de661 as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/A/9f93097ac9394f4e83ca0f0c2c8de661 2024-12-06T10:17:01,746 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/A/9f93097ac9394f4e83ca0f0c2c8de661, entries=200, sequenceid=278, filesize=14.4 K 2024-12-06T10:17:01,748 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/B/2f8f489fc12e476fbc2ff7ad4184cb69 as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/B/2f8f489fc12e476fbc2ff7ad4184cb69 2024-12-06T10:17:01,754 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/B/2f8f489fc12e476fbc2ff7ad4184cb69, entries=150, sequenceid=278, filesize=12.0 K 2024-12-06T10:17:01,755 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/C/22d53536dd1d4f38802c010ded8adf15 as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/C/22d53536dd1d4f38802c010ded8adf15 2024-12-06T10:17:01,764 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/C/22d53536dd1d4f38802c010ded8adf15, entries=150, sequenceid=278, filesize=12.0 K 2024-12-06T10:17:01,766 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~114.05 KB/116790, heapSize ~299.53 KB/306720, currentSize=87.22 KB/89310 for b58170106b3730174deb9625aeac23df in 593ms, sequenceid=278, compaction requested=false 2024-12-06T10:17:01,766 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for b58170106b3730174deb9625aeac23df: 2024-12-06T10:17:01,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=20 2024-12-06T10:17:01,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(8581): Flush requested on b58170106b3730174deb9625aeac23df 2024-12-06T10:17:01,808 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing b58170106b3730174deb9625aeac23df 3/3 column families, dataSize=93.93 KB heapSize=246.84 KB 2024-12-06T10:17:01,809 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b58170106b3730174deb9625aeac23df, store=A 2024-12-06T10:17:01,810 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:17:01,810 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b58170106b3730174deb9625aeac23df, store=B 2024-12-06T10:17:01,810 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:17:01,810 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b58170106b3730174deb9625aeac23df, store=C 2024-12-06T10:17:01,810 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:17:01,820 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,33397,1733480204743 2024-12-06T10:17:01,821 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33397 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=21 2024-12-06T10:17:01,821 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df. 2024-12-06T10:17:01,821 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df. as already flushing 2024-12-06T10:17:01,821 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df. 2024-12-06T10:17:01,821 ERROR [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] handler.RSProcedureHandler(58): pid=21 java.io.IOException: Unable to complete flush {ENCODED => b58170106b3730174deb9625aeac23df, NAME => 'TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:17:01,822 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=21 java.io.IOException: Unable to complete flush {ENCODED => b58170106b3730174deb9625aeac23df, NAME => 'TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:17:01,822 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/A/cb088467176847109c0605b1d39d469c is 50, key is test_row_0/A:col10/1733480221807/Put/seqid=0 2024-12-06T10:17:01,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.HMaster(4114): Remote procedure failed, pid=21 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => b58170106b3730174deb9625aeac23df, NAME => 'TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => b58170106b3730174deb9625aeac23df, NAME => 'TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:17:01,840 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:01,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 130 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51570 deadline: 1733480281834, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:01,841 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:01,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 134 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51526 deadline: 1733480281836, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:01,842 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:01,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 134 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51552 deadline: 1733480281837, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:01,843 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:01,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 132 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51536 deadline: 1733480281838, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:01,843 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:01,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 133 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51554 deadline: 1733480281839, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:01,844 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073741899_1075 (size=14741) 2024-12-06T10:17:01,853 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=297 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/A/cb088467176847109c0605b1d39d469c 2024-12-06T10:17:01,870 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/B/cb41f0ef1ffb4a28b396a7297574694e is 50, key is test_row_0/B:col10/1733480221807/Put/seqid=0 2024-12-06T10:17:01,898 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073741900_1076 (size=12301) 2024-12-06T10:17:01,902 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=297 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/B/cb41f0ef1ffb4a28b396a7297574694e 2024-12-06T10:17:01,915 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/C/149615e2a4e848e0b97e6efac21429d2 is 50, key is test_row_0/C:col10/1733480221807/Put/seqid=0 2024-12-06T10:17:01,944 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073741901_1077 (size=12301) 2024-12-06T10:17:01,944 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:01,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 132 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51570 deadline: 1733480281942, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:01,948 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:01,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 136 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51552 deadline: 1733480281945, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:01,949 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:01,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 135 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51554 deadline: 1733480281946, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:01,949 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:01,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 136 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51526 deadline: 1733480281946, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:01,950 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:01,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 134 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51536 deadline: 1733480281946, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:01,951 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=297 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/C/149615e2a4e848e0b97e6efac21429d2 2024-12-06T10:17:01,960 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/A/cb088467176847109c0605b1d39d469c as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/A/cb088467176847109c0605b1d39d469c 2024-12-06T10:17:01,967 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/A/cb088467176847109c0605b1d39d469c, entries=200, sequenceid=297, filesize=14.4 K 2024-12-06T10:17:01,968 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/B/cb41f0ef1ffb4a28b396a7297574694e as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/B/cb41f0ef1ffb4a28b396a7297574694e 2024-12-06T10:17:01,974 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,33397,1733480204743 2024-12-06T10:17:01,975 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33397 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=21 2024-12-06T10:17:01,975 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df. 2024-12-06T10:17:01,976 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df. as already flushing 2024-12-06T10:17:01,976 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df. 2024-12-06T10:17:01,976 ERROR [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] handler.RSProcedureHandler(58): pid=21 java.io.IOException: Unable to complete flush {ENCODED => b58170106b3730174deb9625aeac23df, NAME => 'TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:17:01,976 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=21 java.io.IOException: Unable to complete flush {ENCODED => b58170106b3730174deb9625aeac23df, NAME => 'TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:17:01,977 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.HMaster(4114): Remote procedure failed, pid=21 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => b58170106b3730174deb9625aeac23df, NAME => 'TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => b58170106b3730174deb9625aeac23df, NAME => 'TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:17:01,980 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/B/cb41f0ef1ffb4a28b396a7297574694e, entries=150, sequenceid=297, filesize=12.0 K 2024-12-06T10:17:01,982 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/C/149615e2a4e848e0b97e6efac21429d2 as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/C/149615e2a4e848e0b97e6efac21429d2 2024-12-06T10:17:01,990 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/C/149615e2a4e848e0b97e6efac21429d2, entries=150, sequenceid=297, filesize=12.0 K 2024-12-06T10:17:01,991 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~100.63 KB/103050, heapSize ~264.38 KB/270720, currentSize=100.63 KB/103050 for b58170106b3730174deb9625aeac23df in 183ms, sequenceid=297, compaction requested=true 2024-12-06T10:17:01,991 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for b58170106b3730174deb9625aeac23df: 2024-12-06T10:17:01,991 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store b58170106b3730174deb9625aeac23df:A, priority=-2147483648, current under compaction store size is 1 2024-12-06T10:17:01,991 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T10:17:01,991 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-06T10:17:01,991 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store b58170106b3730174deb9625aeac23df:B, priority=-2147483648, current under compaction store size is 2 2024-12-06T10:17:01,991 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T10:17:01,991 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-06T10:17:01,991 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store b58170106b3730174deb9625aeac23df:C, priority=-2147483648, current under compaction store size is 3 2024-12-06T10:17:01,991 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-06T10:17:01,993 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37367 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-06T10:17:01,993 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HStore(1540): b58170106b3730174deb9625aeac23df/B is initiating minor compaction (all files) 2024-12-06T10:17:01,993 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of b58170106b3730174deb9625aeac23df/B in TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df. 2024-12-06T10:17:01,993 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/B/250670863cb64b3db3b22dba6ac0e464, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/B/2f8f489fc12e476fbc2ff7ad4184cb69, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/B/cb41f0ef1ffb4a28b396a7297574694e] into tmpdir=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp, totalSize=36.5 K 2024-12-06T10:17:01,993 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 42247 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-06T10:17:01,993 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HStore(1540): b58170106b3730174deb9625aeac23df/A is initiating minor compaction (all files) 2024-12-06T10:17:01,994 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of b58170106b3730174deb9625aeac23df/A in TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df. 2024-12-06T10:17:01,994 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/A/32362215a25f4a30a943f96d2556b778, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/A/9f93097ac9394f4e83ca0f0c2c8de661, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/A/cb088467176847109c0605b1d39d469c] into tmpdir=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp, totalSize=41.3 K 2024-12-06T10:17:01,994 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.Compactor(224): Compacting 250670863cb64b3db3b22dba6ac0e464, keycount=150, bloomtype=ROW, size=12.5 K, encoding=NONE, compression=NONE, seqNum=256, earliestPutTs=1733480220520 2024-12-06T10:17:01,995 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.Compactor(224): Compacting 32362215a25f4a30a943f96d2556b778, keycount=150, bloomtype=ROW, size=12.5 K, encoding=NONE, compression=NONE, seqNum=256, earliestPutTs=1733480220520 2024-12-06T10:17:01,995 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.Compactor(224): Compacting 2f8f489fc12e476fbc2ff7ad4184cb69, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=278, earliestPutTs=1733480220545 2024-12-06T10:17:01,995 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.Compactor(224): Compacting 9f93097ac9394f4e83ca0f0c2c8de661, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=278, earliestPutTs=1733480220545 2024-12-06T10:17:01,996 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.Compactor(224): Compacting cb41f0ef1ffb4a28b396a7297574694e, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=297, earliestPutTs=1733480221178 2024-12-06T10:17:01,996 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.Compactor(224): Compacting cb088467176847109c0605b1d39d469c, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=297, earliestPutTs=1733480221178 2024-12-06T10:17:02,009 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): b58170106b3730174deb9625aeac23df#B#compaction#63 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-06T10:17:02,011 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/B/b01a7a033c9b43818214700631a18104 is 50, key is test_row_0/B:col10/1733480221807/Put/seqid=0 2024-12-06T10:17:02,015 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): b58170106b3730174deb9625aeac23df#A#compaction#64 average throughput is 2.18 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-06T10:17:02,016 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/A/857ff39530824cd78ad83926b85d6fb2 is 50, key is test_row_0/A:col10/1733480221807/Put/seqid=0 2024-12-06T10:17:02,026 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073741902_1078 (size=13017) 2024-12-06T10:17:02,027 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073741903_1079 (size=13017) 2024-12-06T10:17:02,129 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,33397,1733480204743 2024-12-06T10:17:02,130 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33397 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=21 2024-12-06T10:17:02,130 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df. 2024-12-06T10:17:02,130 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegion(2837): Flushing b58170106b3730174deb9625aeac23df 3/3 column families, dataSize=100.63 KB heapSize=264.42 KB 2024-12-06T10:17:02,130 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b58170106b3730174deb9625aeac23df, store=A 2024-12-06T10:17:02,131 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:17:02,131 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b58170106b3730174deb9625aeac23df, store=B 2024-12-06T10:17:02,131 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:17:02,131 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b58170106b3730174deb9625aeac23df, store=C 2024-12-06T10:17:02,131 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:17:02,142 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/A/03d05db48d62407fa1967e561b01dd71 is 50, key is test_row_0/A:col10/1733480221834/Put/seqid=0 2024-12-06T10:17:02,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(8581): Flush requested on b58170106b3730174deb9625aeac23df 2024-12-06T10:17:02,149 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df. as already flushing 2024-12-06T10:17:02,165 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073741904_1080 (size=12301) 2024-12-06T10:17:02,167 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=315 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/A/03d05db48d62407fa1967e561b01dd71 2024-12-06T10:17:02,178 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:02,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 140 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51526 deadline: 1733480282171, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:02,179 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:02,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 139 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51536 deadline: 1733480282173, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:02,180 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:02,180 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 138 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51570 deadline: 1733480282171, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:02,180 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:02,180 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:02,180 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 140 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51554 deadline: 1733480282176, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:02,180 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 141 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51552 deadline: 1733480282178, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:02,198 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/B/978ef75d9cb54cff9129245634d6dc17 is 50, key is test_row_0/B:col10/1733480221834/Put/seqid=0 2024-12-06T10:17:02,222 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073741905_1081 (size=12301) 2024-12-06T10:17:02,224 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=315 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/B/978ef75d9cb54cff9129245634d6dc17 2024-12-06T10:17:02,238 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/C/721379b8896e4d9aa870860966dc9c8f is 50, key is test_row_0/C:col10/1733480221834/Put/seqid=0 2024-12-06T10:17:02,245 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073741906_1082 (size=12301) 2024-12-06T10:17:02,249 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=315 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/C/721379b8896e4d9aa870860966dc9c8f 2024-12-06T10:17:02,258 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/A/03d05db48d62407fa1967e561b01dd71 as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/A/03d05db48d62407fa1967e561b01dd71 2024-12-06T10:17:02,266 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/A/03d05db48d62407fa1967e561b01dd71, entries=150, sequenceid=315, filesize=12.0 K 2024-12-06T10:17:02,267 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/B/978ef75d9cb54cff9129245634d6dc17 as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/B/978ef75d9cb54cff9129245634d6dc17 2024-12-06T10:17:02,272 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/B/978ef75d9cb54cff9129245634d6dc17, entries=150, sequenceid=315, filesize=12.0 K 2024-12-06T10:17:02,275 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/C/721379b8896e4d9aa870860966dc9c8f as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/C/721379b8896e4d9aa870860966dc9c8f 2024-12-06T10:17:02,281 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:02,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 142 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51526 deadline: 1733480282280, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:02,282 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:02,282 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 141 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51536 deadline: 1733480282280, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:02,282 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/C/721379b8896e4d9aa870860966dc9c8f, entries=150, sequenceid=315, filesize=12.0 K 2024-12-06T10:17:02,283 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegion(3040): Finished flush of dataSize ~100.63 KB/103050, heapSize ~264.38 KB/270720, currentSize=100.63 KB/103050 for b58170106b3730174deb9625aeac23df in 153ms, sequenceid=315, compaction requested=true 2024-12-06T10:17:02,283 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegion(2538): Flush status journal for b58170106b3730174deb9625aeac23df: 2024-12-06T10:17:02,283 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df. 2024-12-06T10:17:02,283 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=21 2024-12-06T10:17:02,284 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.HMaster(4106): Remote procedure done, pid=21 2024-12-06T10:17:02,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(8581): Flush requested on b58170106b3730174deb9625aeac23df 2024-12-06T10:17:02,288 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing b58170106b3730174deb9625aeac23df 3/3 column families, dataSize=107.34 KB heapSize=282 KB 2024-12-06T10:17:02,289 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=21, resume processing ppid=20 2024-12-06T10:17:02,289 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=21, ppid=20, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.0830 sec 2024-12-06T10:17:02,291 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b58170106b3730174deb9625aeac23df, store=A 2024-12-06T10:17:02,291 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:17:02,291 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b58170106b3730174deb9625aeac23df, store=B 2024-12-06T10:17:02,291 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:17:02,292 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b58170106b3730174deb9625aeac23df, store=C 2024-12-06T10:17:02,292 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:17:02,298 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=20, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=20, table=TestAcidGuarantees in 1.0930 sec 2024-12-06T10:17:02,301 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/A/265cdb6536054b4da97d5ca72c549230 is 50, key is test_row_0/A:col10/1733480222288/Put/seqid=0 2024-12-06T10:17:02,306 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=20 2024-12-06T10:17:02,306 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 20 completed 2024-12-06T10:17:02,308 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-06T10:17:02,309 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] procedure2.ProcedureExecutor(1098): Stored pid=22, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=22, table=TestAcidGuarantees 2024-12-06T10:17:02,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=22 2024-12-06T10:17:02,311 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=22, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=22, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-06T10:17:02,312 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=22, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=22, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-06T10:17:02,312 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=23, ppid=22, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-06T10:17:02,319 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073741907_1083 (size=17181) 2024-12-06T10:17:02,336 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:02,337 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 148 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51552 deadline: 1733480282328, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:02,337 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:02,337 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 146 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51570 deadline: 1733480282333, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:02,337 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:02,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 147 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51554 deadline: 1733480282333, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:02,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=22 2024-12-06T10:17:02,441 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:02,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 149 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51554 deadline: 1733480282439, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:02,442 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:02,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 148 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51570 deadline: 1733480282439, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:02,443 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:02,443 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 150 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51552 deadline: 1733480282439, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:02,447 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/B/b01a7a033c9b43818214700631a18104 as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/B/b01a7a033c9b43818214700631a18104 2024-12-06T10:17:02,449 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/A/857ff39530824cd78ad83926b85d6fb2 as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/A/857ff39530824cd78ad83926b85d6fb2 2024-12-06T10:17:02,461 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in b58170106b3730174deb9625aeac23df/A of b58170106b3730174deb9625aeac23df into 857ff39530824cd78ad83926b85d6fb2(size=12.7 K), total size for store is 24.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-06T10:17:02,461 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for b58170106b3730174deb9625aeac23df: 2024-12-06T10:17:02,461 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df., storeName=b58170106b3730174deb9625aeac23df/A, priority=13, startTime=1733480221991; duration=0sec 2024-12-06T10:17:02,462 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-06T10:17:02,462 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: b58170106b3730174deb9625aeac23df:A 2024-12-06T10:17:02,462 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-06T10:17:02,464 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,33397,1733480204743 2024-12-06T10:17:02,464 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49668 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-06T10:17:02,465 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33397 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=23 2024-12-06T10:17:02,465 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df. 2024-12-06T10:17:02,465 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HStore(1540): b58170106b3730174deb9625aeac23df/C is initiating minor compaction (all files) 2024-12-06T10:17:02,465 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df. as already flushing 2024-12-06T10:17:02,465 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of b58170106b3730174deb9625aeac23df/C in TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df. 2024-12-06T10:17:02,465 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df. 2024-12-06T10:17:02,465 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/C/3b3fe211d914436bb3657b4e5b9abf41, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/C/22d53536dd1d4f38802c010ded8adf15, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/C/149615e2a4e848e0b97e6efac21429d2, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/C/721379b8896e4d9aa870860966dc9c8f] into tmpdir=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp, totalSize=48.5 K 2024-12-06T10:17:02,465 ERROR [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] handler.RSProcedureHandler(58): pid=23 java.io.IOException: Unable to complete flush {ENCODED => b58170106b3730174deb9625aeac23df, NAME => 'TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:17:02,465 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=23 java.io.IOException: Unable to complete flush {ENCODED => b58170106b3730174deb9625aeac23df, NAME => 'TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:17:02,466 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.Compactor(224): Compacting 3b3fe211d914436bb3657b4e5b9abf41, keycount=150, bloomtype=ROW, size=12.5 K, encoding=NONE, compression=NONE, seqNum=256, earliestPutTs=1733480220520 2024-12-06T10:17:02,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.HMaster(4114): Remote procedure failed, pid=23 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => b58170106b3730174deb9625aeac23df, NAME => 'TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => b58170106b3730174deb9625aeac23df, NAME => 'TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:17:02,467 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.Compactor(224): Compacting 22d53536dd1d4f38802c010ded8adf15, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=278, earliestPutTs=1733480220545 2024-12-06T10:17:02,468 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.Compactor(224): Compacting 149615e2a4e848e0b97e6efac21429d2, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=297, earliestPutTs=1733480221178 2024-12-06T10:17:02,469 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.Compactor(224): Compacting 721379b8896e4d9aa870860966dc9c8f, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=315, earliestPutTs=1733480221834 2024-12-06T10:17:02,470 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in b58170106b3730174deb9625aeac23df/B of b58170106b3730174deb9625aeac23df into b01a7a033c9b43818214700631a18104(size=12.7 K), total size for store is 24.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-06T10:17:02,470 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for b58170106b3730174deb9625aeac23df: 2024-12-06T10:17:02,471 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df., storeName=b58170106b3730174deb9625aeac23df/B, priority=13, startTime=1733480221991; duration=0sec 2024-12-06T10:17:02,471 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T10:17:02,471 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: b58170106b3730174deb9625aeac23df:B 2024-12-06T10:17:02,485 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:02,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 144 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51526 deadline: 1733480282484, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:02,486 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): b58170106b3730174deb9625aeac23df#C#compaction#69 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-06T10:17:02,486 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:02,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 143 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51536 deadline: 1733480282484, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:02,487 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/C/8d66d30d86d94f469af041280f319d3b is 50, key is test_row_0/C:col10/1733480221834/Put/seqid=0 2024-12-06T10:17:02,492 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073741908_1084 (size=13051) 2024-12-06T10:17:02,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=22 2024-12-06T10:17:02,618 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,33397,1733480204743 2024-12-06T10:17:02,618 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33397 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=23 2024-12-06T10:17:02,618 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df. 2024-12-06T10:17:02,618 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df. as already flushing 2024-12-06T10:17:02,618 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df. 2024-12-06T10:17:02,618 ERROR [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] handler.RSProcedureHandler(58): pid=23 java.io.IOException: Unable to complete flush {ENCODED => b58170106b3730174deb9625aeac23df, NAME => 'TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:17:02,619 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=23 java.io.IOException: Unable to complete flush {ENCODED => b58170106b3730174deb9625aeac23df, NAME => 'TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:17:02,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.HMaster(4114): Remote procedure failed, pid=23 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => b58170106b3730174deb9625aeac23df, NAME => 'TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => b58170106b3730174deb9625aeac23df, NAME => 'TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:17:02,643 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:02,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 151 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51554 deadline: 1733480282643, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:02,647 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:02,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 150 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51570 deadline: 1733480282644, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:02,647 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:02,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 152 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51552 deadline: 1733480282645, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:02,720 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=336 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/A/265cdb6536054b4da97d5ca72c549230 2024-12-06T10:17:02,739 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/B/86b5691360ff4303bf0720dfe8e618e7 is 50, key is test_row_0/B:col10/1733480222288/Put/seqid=0 2024-12-06T10:17:02,750 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073741909_1085 (size=12301) 2024-12-06T10:17:02,751 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=336 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/B/86b5691360ff4303bf0720dfe8e618e7 2024-12-06T10:17:02,759 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/C/ed7b37c940b94ba1abd0602676455c8b is 50, key is test_row_0/C:col10/1733480222288/Put/seqid=0 2024-12-06T10:17:02,764 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073741910_1086 (size=12301) 2024-12-06T10:17:02,771 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,33397,1733480204743 2024-12-06T10:17:02,771 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33397 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=23 2024-12-06T10:17:02,772 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df. 2024-12-06T10:17:02,772 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df. as already flushing 2024-12-06T10:17:02,772 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df. 2024-12-06T10:17:02,772 ERROR [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] handler.RSProcedureHandler(58): pid=23 java.io.IOException: Unable to complete flush {ENCODED => b58170106b3730174deb9625aeac23df, NAME => 'TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:17:02,772 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=23 java.io.IOException: Unable to complete flush {ENCODED => b58170106b3730174deb9625aeac23df, NAME => 'TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:17:02,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.HMaster(4114): Remote procedure failed, pid=23 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => b58170106b3730174deb9625aeac23df, NAME => 'TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => b58170106b3730174deb9625aeac23df, NAME => 'TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:17:02,789 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:02,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 146 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51526 deadline: 1733480282788, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:02,791 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:02,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 145 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51536 deadline: 1733480282790, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:02,901 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/C/8d66d30d86d94f469af041280f319d3b as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/C/8d66d30d86d94f469af041280f319d3b 2024-12-06T10:17:02,908 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in b58170106b3730174deb9625aeac23df/C of b58170106b3730174deb9625aeac23df into 8d66d30d86d94f469af041280f319d3b(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-06T10:17:02,909 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for b58170106b3730174deb9625aeac23df: 2024-12-06T10:17:02,909 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df., storeName=b58170106b3730174deb9625aeac23df/C, priority=12, startTime=1733480221991; duration=0sec 2024-12-06T10:17:02,909 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T10:17:02,909 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: b58170106b3730174deb9625aeac23df:C 2024-12-06T10:17:02,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=22 2024-12-06T10:17:02,924 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,33397,1733480204743 2024-12-06T10:17:02,925 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33397 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=23 2024-12-06T10:17:02,925 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df. 2024-12-06T10:17:02,925 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df. as already flushing 2024-12-06T10:17:02,925 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df. 2024-12-06T10:17:02,925 ERROR [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] handler.RSProcedureHandler(58): pid=23 java.io.IOException: Unable to complete flush {ENCODED => b58170106b3730174deb9625aeac23df, NAME => 'TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:17:02,925 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=23 java.io.IOException: Unable to complete flush {ENCODED => b58170106b3730174deb9625aeac23df, NAME => 'TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:17:02,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.HMaster(4114): Remote procedure failed, pid=23 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => b58170106b3730174deb9625aeac23df, NAME => 'TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => b58170106b3730174deb9625aeac23df, NAME => 'TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:17:02,948 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:02,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 153 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51554 deadline: 1733480282947, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:02,951 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:02,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 152 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51570 deadline: 1733480282950, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:02,952 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:02,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 154 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51552 deadline: 1733480282950, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:03,084 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,33397,1733480204743 2024-12-06T10:17:03,085 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33397 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=23 2024-12-06T10:17:03,085 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df. 2024-12-06T10:17:03,085 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df. as already flushing 2024-12-06T10:17:03,085 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df. 2024-12-06T10:17:03,085 ERROR [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] handler.RSProcedureHandler(58): pid=23 java.io.IOException: Unable to complete flush {ENCODED => b58170106b3730174deb9625aeac23df, NAME => 'TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:17:03,086 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=23 java.io.IOException: Unable to complete flush {ENCODED => b58170106b3730174deb9625aeac23df, NAME => 'TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:17:03,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.HMaster(4114): Remote procedure failed, pid=23 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => b58170106b3730174deb9625aeac23df, NAME => 'TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => b58170106b3730174deb9625aeac23df, NAME => 'TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:17:03,165 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=336 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/C/ed7b37c940b94ba1abd0602676455c8b 2024-12-06T10:17:03,175 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/A/265cdb6536054b4da97d5ca72c549230 as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/A/265cdb6536054b4da97d5ca72c549230 2024-12-06T10:17:03,180 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/A/265cdb6536054b4da97d5ca72c549230, entries=250, sequenceid=336, filesize=16.8 K 2024-12-06T10:17:03,181 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/B/86b5691360ff4303bf0720dfe8e618e7 as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/B/86b5691360ff4303bf0720dfe8e618e7 2024-12-06T10:17:03,188 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/B/86b5691360ff4303bf0720dfe8e618e7, entries=150, sequenceid=336, filesize=12.0 K 2024-12-06T10:17:03,190 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/C/ed7b37c940b94ba1abd0602676455c8b as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/C/ed7b37c940b94ba1abd0602676455c8b 2024-12-06T10:17:03,198 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/C/ed7b37c940b94ba1abd0602676455c8b, entries=150, sequenceid=336, filesize=12.0 K 2024-12-06T10:17:03,200 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~120.76 KB/123660, heapSize ~317.11 KB/324720, currentSize=87.22 KB/89310 for b58170106b3730174deb9625aeac23df in 912ms, sequenceid=336, compaction requested=true 2024-12-06T10:17:03,200 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for b58170106b3730174deb9625aeac23df: 2024-12-06T10:17:03,200 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-06T10:17:03,201 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store b58170106b3730174deb9625aeac23df:A, priority=-2147483648, current under compaction store size is 1 2024-12-06T10:17:03,201 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 42499 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-06T10:17:03,202 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HStore(1540): b58170106b3730174deb9625aeac23df/A is initiating minor compaction (all files) 2024-12-06T10:17:03,202 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of b58170106b3730174deb9625aeac23df/A in TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df. 2024-12-06T10:17:03,202 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/A/857ff39530824cd78ad83926b85d6fb2, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/A/03d05db48d62407fa1967e561b01dd71, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/A/265cdb6536054b4da97d5ca72c549230] into tmpdir=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp, totalSize=41.5 K 2024-12-06T10:17:03,202 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T10:17:03,202 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-06T10:17:03,203 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.Compactor(224): Compacting 857ff39530824cd78ad83926b85d6fb2, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=297, earliestPutTs=1733480221178 2024-12-06T10:17:03,203 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.Compactor(224): Compacting 03d05db48d62407fa1967e561b01dd71, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=315, earliestPutTs=1733480221834 2024-12-06T10:17:03,204 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37619 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-06T10:17:03,204 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HStore(1540): b58170106b3730174deb9625aeac23df/B is initiating minor compaction (all files) 2024-12-06T10:17:03,204 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of b58170106b3730174deb9625aeac23df/B in TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df. 2024-12-06T10:17:03,204 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/B/b01a7a033c9b43818214700631a18104, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/B/978ef75d9cb54cff9129245634d6dc17, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/B/86b5691360ff4303bf0720dfe8e618e7] into tmpdir=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp, totalSize=36.7 K 2024-12-06T10:17:03,204 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.Compactor(224): Compacting 265cdb6536054b4da97d5ca72c549230, keycount=250, bloomtype=ROW, size=16.8 K, encoding=NONE, compression=NONE, seqNum=336, earliestPutTs=1733480222164 2024-12-06T10:17:03,205 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.Compactor(224): Compacting b01a7a033c9b43818214700631a18104, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=297, earliestPutTs=1733480221178 2024-12-06T10:17:03,206 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.Compactor(224): Compacting 978ef75d9cb54cff9129245634d6dc17, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=315, earliestPutTs=1733480221834 2024-12-06T10:17:03,206 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store b58170106b3730174deb9625aeac23df:B, priority=-2147483648, current under compaction store size is 2 2024-12-06T10:17:03,206 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.Compactor(224): Compacting 86b5691360ff4303bf0720dfe8e618e7, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=336, earliestPutTs=1733480222164 2024-12-06T10:17:03,208 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T10:17:03,208 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store b58170106b3730174deb9625aeac23df:C, priority=-2147483648, current under compaction store size is 3 2024-12-06T10:17:03,208 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-06T10:17:03,226 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): b58170106b3730174deb9625aeac23df#B#compaction#72 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-06T10:17:03,226 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): b58170106b3730174deb9625aeac23df#A#compaction#73 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-06T10:17:03,227 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/A/ed8e44593bb74a74aa8f8992a6cff723 is 50, key is test_row_0/A:col10/1733480222288/Put/seqid=0 2024-12-06T10:17:03,229 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/B/656296dee1b241ffbc152db35de344ce is 50, key is test_row_0/B:col10/1733480222288/Put/seqid=0 2024-12-06T10:17:03,233 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073741911_1087 (size=13119) 2024-12-06T10:17:03,238 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,33397,1733480204743 2024-12-06T10:17:03,238 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073741912_1088 (size=13119) 2024-12-06T10:17:03,238 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33397 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=23 2024-12-06T10:17:03,238 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df. 2024-12-06T10:17:03,239 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegion(2837): Flushing b58170106b3730174deb9625aeac23df 3/3 column families, dataSize=87.22 KB heapSize=229.27 KB 2024-12-06T10:17:03,239 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b58170106b3730174deb9625aeac23df, store=A 2024-12-06T10:17:03,239 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:17:03,239 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b58170106b3730174deb9625aeac23df, store=B 2024-12-06T10:17:03,239 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:17:03,239 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b58170106b3730174deb9625aeac23df, store=C 2024-12-06T10:17:03,239 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:17:03,245 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/B/656296dee1b241ffbc152db35de344ce as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/B/656296dee1b241ffbc152db35de344ce 2024-12-06T10:17:03,246 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/A/ed8e44593bb74a74aa8f8992a6cff723 as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/A/ed8e44593bb74a74aa8f8992a6cff723 2024-12-06T10:17:03,247 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/A/512a0fd606244571a39b85a30fd299b4 is 50, key is test_row_0/A:col10/1733480222321/Put/seqid=0 2024-12-06T10:17:03,257 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in b58170106b3730174deb9625aeac23df/B of b58170106b3730174deb9625aeac23df into 656296dee1b241ffbc152db35de344ce(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-06T10:17:03,258 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for b58170106b3730174deb9625aeac23df: 2024-12-06T10:17:03,258 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df., storeName=b58170106b3730174deb9625aeac23df/B, priority=13, startTime=1733480223202; duration=0sec 2024-12-06T10:17:03,258 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-06T10:17:03,258 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: b58170106b3730174deb9625aeac23df:B 2024-12-06T10:17:03,258 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 2 store files, 0 compacting, 2 eligible, 16 blocking 2024-12-06T10:17:03,259 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in b58170106b3730174deb9625aeac23df/A of b58170106b3730174deb9625aeac23df into ed8e44593bb74a74aa8f8992a6cff723(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-06T10:17:03,259 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for b58170106b3730174deb9625aeac23df: 2024-12-06T10:17:03,259 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df., storeName=b58170106b3730174deb9625aeac23df/A, priority=13, startTime=1733480223200; duration=0sec 2024-12-06T10:17:03,259 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T10:17:03,259 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: b58170106b3730174deb9625aeac23df:A 2024-12-06T10:17:03,261 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 0 files of size 0 starting at candidate #-1 after considering 0 permutations with 0 in ratio 2024-12-06T10:17:03,261 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.SortedCompactionPolicy(232): Not compacting files because we only have 0 files ready for compaction. Need 3 to initiate. 2024-12-06T10:17:03,261 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.CompactSplit(450): Not compacting TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df. because compaction request was cancelled 2024-12-06T10:17:03,261 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: b58170106b3730174deb9625aeac23df:C 2024-12-06T10:17:03,263 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073741913_1089 (size=12301) 2024-12-06T10:17:03,266 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=355 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/A/512a0fd606244571a39b85a30fd299b4 2024-12-06T10:17:03,277 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/B/072a8e0596e044fe81a5bddc9ddcb1e3 is 50, key is test_row_0/B:col10/1733480222321/Put/seqid=0 2024-12-06T10:17:03,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(8581): Flush requested on b58170106b3730174deb9625aeac23df 2024-12-06T10:17:03,298 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df. as already flushing 2024-12-06T10:17:03,314 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073741914_1090 (size=12301) 2024-12-06T10:17:03,348 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:03,348 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:03,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 156 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51536 deadline: 1733480283345, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:03,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 157 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51526 deadline: 1733480283345, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:03,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=22 2024-12-06T10:17:03,450 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:03,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 158 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51536 deadline: 1733480283450, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:03,451 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:03,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 159 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51526 deadline: 1733480283450, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:03,453 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:03,454 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 155 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51554 deadline: 1733480283452, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:03,457 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:03,457 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 154 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51570 deadline: 1733480283455, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:03,458 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:03,458 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 156 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51552 deadline: 1733480283457, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:03,655 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:03,655 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 160 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51536 deadline: 1733480283654, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:03,656 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:03,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 161 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51526 deadline: 1733480283654, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:03,715 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=355 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/B/072a8e0596e044fe81a5bddc9ddcb1e3 2024-12-06T10:17:03,725 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/C/c7e5249d0c724e20b873e52bf53b0527 is 50, key is test_row_0/C:col10/1733480222321/Put/seqid=0 2024-12-06T10:17:03,740 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073741915_1091 (size=12301) 2024-12-06T10:17:03,741 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=355 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/C/c7e5249d0c724e20b873e52bf53b0527 2024-12-06T10:17:03,752 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/A/512a0fd606244571a39b85a30fd299b4 as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/A/512a0fd606244571a39b85a30fd299b4 2024-12-06T10:17:03,758 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/A/512a0fd606244571a39b85a30fd299b4, entries=150, sequenceid=355, filesize=12.0 K 2024-12-06T10:17:03,759 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/B/072a8e0596e044fe81a5bddc9ddcb1e3 as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/B/072a8e0596e044fe81a5bddc9ddcb1e3 2024-12-06T10:17:03,773 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/B/072a8e0596e044fe81a5bddc9ddcb1e3, entries=150, sequenceid=355, filesize=12.0 K 2024-12-06T10:17:03,774 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/C/c7e5249d0c724e20b873e52bf53b0527 as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/C/c7e5249d0c724e20b873e52bf53b0527 2024-12-06T10:17:03,786 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/C/c7e5249d0c724e20b873e52bf53b0527, entries=150, sequenceid=355, filesize=12.0 K 2024-12-06T10:17:03,788 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegion(3040): Finished flush of dataSize ~87.22 KB/89310, heapSize ~229.22 KB/234720, currentSize=120.76 KB/123660 for b58170106b3730174deb9625aeac23df in 550ms, sequenceid=355, compaction requested=true 2024-12-06T10:17:03,788 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegion(2538): Flush status journal for b58170106b3730174deb9625aeac23df: 2024-12-06T10:17:03,788 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df. 2024-12-06T10:17:03,788 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=23 2024-12-06T10:17:03,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.HMaster(4106): Remote procedure done, pid=23 2024-12-06T10:17:03,792 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=23, resume processing ppid=22 2024-12-06T10:17:03,792 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=23, ppid=22, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.4780 sec 2024-12-06T10:17:03,794 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=22, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=22, table=TestAcidGuarantees in 1.4840 sec 2024-12-06T10:17:03,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(8581): Flush requested on b58170106b3730174deb9625aeac23df 2024-12-06T10:17:03,960 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing b58170106b3730174deb9625aeac23df 3/3 column families, dataSize=127.47 KB heapSize=334.73 KB 2024-12-06T10:17:03,961 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b58170106b3730174deb9625aeac23df, store=A 2024-12-06T10:17:03,961 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:17:03,961 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b58170106b3730174deb9625aeac23df, store=B 2024-12-06T10:17:03,961 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:17:03,961 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b58170106b3730174deb9625aeac23df, store=C 2024-12-06T10:17:03,961 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:17:03,966 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/A/fa0dfd3ce8914714ad27e99972d327be is 50, key is test_row_0/A:col10/1733480223959/Put/seqid=0 2024-12-06T10:17:03,972 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073741916_1092 (size=14741) 2024-12-06T10:17:03,973 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=379 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/A/fa0dfd3ce8914714ad27e99972d327be 2024-12-06T10:17:03,978 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:03,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 168 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51536 deadline: 1733480283976, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:03,980 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:03,980 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 169 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51526 deadline: 1733480283978, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:03,986 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/B/de58abee26c5402a80237519b3e00e40 is 50, key is test_row_0/B:col10/1733480223959/Put/seqid=0 2024-12-06T10:17:03,991 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073741917_1093 (size=12301) 2024-12-06T10:17:03,992 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=379 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/B/de58abee26c5402a80237519b3e00e40 2024-12-06T10:17:04,001 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/C/63014b52b6504c27940c7351de9eb356 is 50, key is test_row_0/C:col10/1733480223959/Put/seqid=0 2024-12-06T10:17:04,011 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073741918_1094 (size=12301) 2024-12-06T10:17:04,011 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=379 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/C/63014b52b6504c27940c7351de9eb356 2024-12-06T10:17:04,018 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/A/fa0dfd3ce8914714ad27e99972d327be as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/A/fa0dfd3ce8914714ad27e99972d327be 2024-12-06T10:17:04,025 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/A/fa0dfd3ce8914714ad27e99972d327be, entries=200, sequenceid=379, filesize=14.4 K 2024-12-06T10:17:04,026 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/B/de58abee26c5402a80237519b3e00e40 as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/B/de58abee26c5402a80237519b3e00e40 2024-12-06T10:17:04,036 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/B/de58abee26c5402a80237519b3e00e40, entries=150, sequenceid=379, filesize=12.0 K 2024-12-06T10:17:04,038 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/C/63014b52b6504c27940c7351de9eb356 as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/C/63014b52b6504c27940c7351de9eb356 2024-12-06T10:17:04,044 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/C/63014b52b6504c27940c7351de9eb356, entries=150, sequenceid=379, filesize=12.0 K 2024-12-06T10:17:04,046 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~127.47 KB/130530, heapSize ~334.69 KB/342720, currentSize=73.80 KB/75570 for b58170106b3730174deb9625aeac23df in 86ms, sequenceid=379, compaction requested=true 2024-12-06T10:17:04,046 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for b58170106b3730174deb9625aeac23df: 2024-12-06T10:17:04,046 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store b58170106b3730174deb9625aeac23df:A, priority=-2147483648, current under compaction store size is 1 2024-12-06T10:17:04,046 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T10:17:04,046 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-06T10:17:04,046 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store b58170106b3730174deb9625aeac23df:B, priority=-2147483648, current under compaction store size is 2 2024-12-06T10:17:04,046 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T10:17:04,046 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-06T10:17:04,046 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store b58170106b3730174deb9625aeac23df:C, priority=-2147483648, current under compaction store size is 3 2024-12-06T10:17:04,046 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-06T10:17:04,047 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 40161 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-06T10:17:04,048 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HStore(1540): b58170106b3730174deb9625aeac23df/A is initiating minor compaction (all files) 2024-12-06T10:17:04,048 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37721 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-06T10:17:04,048 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HStore(1540): b58170106b3730174deb9625aeac23df/B is initiating minor compaction (all files) 2024-12-06T10:17:04,048 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of b58170106b3730174deb9625aeac23df/A in TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df. 2024-12-06T10:17:04,048 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of b58170106b3730174deb9625aeac23df/B in TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df. 2024-12-06T10:17:04,048 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/B/656296dee1b241ffbc152db35de344ce, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/B/072a8e0596e044fe81a5bddc9ddcb1e3, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/B/de58abee26c5402a80237519b3e00e40] into tmpdir=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp, totalSize=36.8 K 2024-12-06T10:17:04,048 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/A/ed8e44593bb74a74aa8f8992a6cff723, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/A/512a0fd606244571a39b85a30fd299b4, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/A/fa0dfd3ce8914714ad27e99972d327be] into tmpdir=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp, totalSize=39.2 K 2024-12-06T10:17:04,049 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.Compactor(224): Compacting ed8e44593bb74a74aa8f8992a6cff723, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=336, earliestPutTs=1733480222164 2024-12-06T10:17:04,049 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.Compactor(224): Compacting 512a0fd606244571a39b85a30fd299b4, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=355, earliestPutTs=1733480222320 2024-12-06T10:17:04,049 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.Compactor(224): Compacting 656296dee1b241ffbc152db35de344ce, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=336, earliestPutTs=1733480222164 2024-12-06T10:17:04,050 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.Compactor(224): Compacting 072a8e0596e044fe81a5bddc9ddcb1e3, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=355, earliestPutTs=1733480222320 2024-12-06T10:17:04,050 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.Compactor(224): Compacting fa0dfd3ce8914714ad27e99972d327be, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=379, earliestPutTs=1733480223316 2024-12-06T10:17:04,051 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.Compactor(224): Compacting de58abee26c5402a80237519b3e00e40, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=379, earliestPutTs=1733480223316 2024-12-06T10:17:04,062 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): b58170106b3730174deb9625aeac23df#A#compaction#80 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-06T10:17:04,063 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/A/b29606ead712487fb468bc685526f5bb is 50, key is test_row_0/A:col10/1733480223959/Put/seqid=0 2024-12-06T10:17:04,074 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): b58170106b3730174deb9625aeac23df#B#compaction#81 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-06T10:17:04,075 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/B/ab3d4085edbe4e8faf8427763a14a4a8 is 50, key is test_row_0/B:col10/1733480223959/Put/seqid=0 2024-12-06T10:17:04,082 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073741919_1095 (size=13221) 2024-12-06T10:17:04,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(8581): Flush requested on b58170106b3730174deb9625aeac23df 2024-12-06T10:17:04,083 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing b58170106b3730174deb9625aeac23df 3/3 column families, dataSize=80.51 KB heapSize=211.69 KB 2024-12-06T10:17:04,084 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b58170106b3730174deb9625aeac23df, store=A 2024-12-06T10:17:04,084 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:17:04,084 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b58170106b3730174deb9625aeac23df, store=B 2024-12-06T10:17:04,084 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:17:04,084 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b58170106b3730174deb9625aeac23df, store=C 2024-12-06T10:17:04,084 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:17:04,091 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/A/b29606ead712487fb468bc685526f5bb as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/A/b29606ead712487fb468bc685526f5bb 2024-12-06T10:17:04,099 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073741920_1096 (size=13221) 2024-12-06T10:17:04,102 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/A/71c720d63d934fa5ba59419363e07690 is 50, key is test_row_0/A:col10/1733480223971/Put/seqid=0 2024-12-06T10:17:04,104 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in b58170106b3730174deb9625aeac23df/A of b58170106b3730174deb9625aeac23df into b29606ead712487fb468bc685526f5bb(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-06T10:17:04,105 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for b58170106b3730174deb9625aeac23df: 2024-12-06T10:17:04,105 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df., storeName=b58170106b3730174deb9625aeac23df/A, priority=13, startTime=1733480224046; duration=0sec 2024-12-06T10:17:04,105 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-06T10:17:04,105 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: b58170106b3730174deb9625aeac23df:A 2024-12-06T10:17:04,105 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-06T10:17:04,109 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/B/ab3d4085edbe4e8faf8427763a14a4a8 as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/B/ab3d4085edbe4e8faf8427763a14a4a8 2024-12-06T10:17:04,109 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49954 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-06T10:17:04,109 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HStore(1540): b58170106b3730174deb9625aeac23df/C is initiating minor compaction (all files) 2024-12-06T10:17:04,109 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of b58170106b3730174deb9625aeac23df/C in TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df. 2024-12-06T10:17:04,109 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/C/8d66d30d86d94f469af041280f319d3b, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/C/ed7b37c940b94ba1abd0602676455c8b, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/C/c7e5249d0c724e20b873e52bf53b0527, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/C/63014b52b6504c27940c7351de9eb356] into tmpdir=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp, totalSize=48.8 K 2024-12-06T10:17:04,110 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.Compactor(224): Compacting 8d66d30d86d94f469af041280f319d3b, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=315, earliestPutTs=1733480221834 2024-12-06T10:17:04,111 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.Compactor(224): Compacting ed7b37c940b94ba1abd0602676455c8b, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=336, earliestPutTs=1733480222164 2024-12-06T10:17:04,112 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.Compactor(224): Compacting c7e5249d0c724e20b873e52bf53b0527, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=355, earliestPutTs=1733480222320 2024-12-06T10:17:04,112 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.Compactor(224): Compacting 63014b52b6504c27940c7351de9eb356, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=379, earliestPutTs=1733480223316 2024-12-06T10:17:04,118 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in b58170106b3730174deb9625aeac23df/B of b58170106b3730174deb9625aeac23df into ab3d4085edbe4e8faf8427763a14a4a8(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-06T10:17:04,118 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for b58170106b3730174deb9625aeac23df: 2024-12-06T10:17:04,118 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df., storeName=b58170106b3730174deb9625aeac23df/B, priority=13, startTime=1733480224046; duration=0sec 2024-12-06T10:17:04,118 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T10:17:04,118 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: b58170106b3730174deb9625aeac23df:B 2024-12-06T10:17:04,135 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:04,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 180 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51536 deadline: 1733480284133, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:04,137 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:04,137 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 180 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51526 deadline: 1733480284135, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:04,138 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073741921_1097 (size=12301) 2024-12-06T10:17:04,139 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=394 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/A/71c720d63d934fa5ba59419363e07690 2024-12-06T10:17:04,146 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): b58170106b3730174deb9625aeac23df#C#compaction#83 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-06T10:17:04,147 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/C/bfbcb8331c244c2a9978127a5740bd52 is 50, key is test_row_0/C:col10/1733480223959/Put/seqid=0 2024-12-06T10:17:04,158 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/B/516c9d5363fe49e88807697ae8097a79 is 50, key is test_row_0/B:col10/1733480223971/Put/seqid=0 2024-12-06T10:17:04,177 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073741922_1098 (size=13187) 2024-12-06T10:17:04,193 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073741923_1099 (size=12301) 2024-12-06T10:17:04,194 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=394 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/B/516c9d5363fe49e88807697ae8097a79 2024-12-06T10:17:04,210 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/C/738c3d438c414e189e039d49a6ef915e is 50, key is test_row_0/C:col10/1733480223971/Put/seqid=0 2024-12-06T10:17:04,224 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073741924_1100 (size=12301) 2024-12-06T10:17:04,238 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:04,239 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 182 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51536 deadline: 1733480284237, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:04,239 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:04,239 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 182 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51526 deadline: 1733480284238, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:04,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=22 2024-12-06T10:17:04,415 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 22 completed 2024-12-06T10:17:04,417 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-06T10:17:04,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] procedure2.ProcedureExecutor(1098): Stored pid=24, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=24, table=TestAcidGuarantees 2024-12-06T10:17:04,419 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=24 2024-12-06T10:17:04,420 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=24, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=24, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-06T10:17:04,421 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=24, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=24, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-06T10:17:04,421 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=25, ppid=24, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-06T10:17:04,441 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:04,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 184 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51536 deadline: 1733480284441, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:04,444 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:04,444 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 184 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51526 deadline: 1733480284442, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:04,458 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:04,458 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 157 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51554 deadline: 1733480284457, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:04,464 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:04,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 158 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51552 deadline: 1733480284463, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:04,466 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:04,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 156 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51570 deadline: 1733480284466, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:04,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=24 2024-12-06T10:17:04,573 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,33397,1733480204743 2024-12-06T10:17:04,574 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33397 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=25 2024-12-06T10:17:04,574 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df. 2024-12-06T10:17:04,574 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df. as already flushing 2024-12-06T10:17:04,575 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df. 2024-12-06T10:17:04,575 ERROR [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] handler.RSProcedureHandler(58): pid=25 java.io.IOException: Unable to complete flush {ENCODED => b58170106b3730174deb9625aeac23df, NAME => 'TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:17:04,575 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=25 java.io.IOException: Unable to complete flush {ENCODED => b58170106b3730174deb9625aeac23df, NAME => 'TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:17:04,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.HMaster(4114): Remote procedure failed, pid=25 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => b58170106b3730174deb9625aeac23df, NAME => 'TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => b58170106b3730174deb9625aeac23df, NAME => 'TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:17:04,587 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/C/bfbcb8331c244c2a9978127a5740bd52 as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/C/bfbcb8331c244c2a9978127a5740bd52 2024-12-06T10:17:04,595 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in b58170106b3730174deb9625aeac23df/C of b58170106b3730174deb9625aeac23df into bfbcb8331c244c2a9978127a5740bd52(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-06T10:17:04,595 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for b58170106b3730174deb9625aeac23df: 2024-12-06T10:17:04,596 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df., storeName=b58170106b3730174deb9625aeac23df/C, priority=12, startTime=1733480224046; duration=0sec 2024-12-06T10:17:04,596 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T10:17:04,596 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: b58170106b3730174deb9625aeac23df:C 2024-12-06T10:17:04,625 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=394 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/C/738c3d438c414e189e039d49a6ef915e 2024-12-06T10:17:04,633 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/A/71c720d63d934fa5ba59419363e07690 as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/A/71c720d63d934fa5ba59419363e07690 2024-12-06T10:17:04,639 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/A/71c720d63d934fa5ba59419363e07690, entries=150, sequenceid=394, filesize=12.0 K 2024-12-06T10:17:04,640 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/B/516c9d5363fe49e88807697ae8097a79 as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/B/516c9d5363fe49e88807697ae8097a79 2024-12-06T10:17:04,646 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/B/516c9d5363fe49e88807697ae8097a79, entries=150, sequenceid=394, filesize=12.0 K 2024-12-06T10:17:04,647 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/C/738c3d438c414e189e039d49a6ef915e as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/C/738c3d438c414e189e039d49a6ef915e 2024-12-06T10:17:04,656 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/C/738c3d438c414e189e039d49a6ef915e, entries=150, sequenceid=394, filesize=12.0 K 2024-12-06T10:17:04,657 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~80.51 KB/82440, heapSize ~211.64 KB/216720, currentSize=120.76 KB/123660 for b58170106b3730174deb9625aeac23df in 574ms, sequenceid=394, compaction requested=false 2024-12-06T10:17:04,657 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for b58170106b3730174deb9625aeac23df: 2024-12-06T10:17:04,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=24 2024-12-06T10:17:04,727 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,33397,1733480204743 2024-12-06T10:17:04,728 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33397 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=25 2024-12-06T10:17:04,728 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df. 2024-12-06T10:17:04,728 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegion(2837): Flushing b58170106b3730174deb9625aeac23df 3/3 column families, dataSize=120.76 KB heapSize=317.16 KB 2024-12-06T10:17:04,729 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b58170106b3730174deb9625aeac23df, store=A 2024-12-06T10:17:04,729 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:17:04,729 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b58170106b3730174deb9625aeac23df, store=B 2024-12-06T10:17:04,729 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:17:04,729 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b58170106b3730174deb9625aeac23df, store=C 2024-12-06T10:17:04,729 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:17:04,735 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/A/6d08b33d1714482c9a78c3bf04d865f8 is 50, key is test_row_0/A:col10/1733480224126/Put/seqid=0 2024-12-06T10:17:04,741 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073741925_1101 (size=12301) 2024-12-06T10:17:04,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(8581): Flush requested on b58170106b3730174deb9625aeac23df 2024-12-06T10:17:04,747 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df. as already flushing 2024-12-06T10:17:04,768 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:04,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 192 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51526 deadline: 1733480284765, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:04,770 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:04,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 192 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51536 deadline: 1733480284768, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:04,870 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:04,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 194 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51526 deadline: 1733480284870, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:04,874 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:04,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 194 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51536 deadline: 1733480284872, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:05,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=24 2024-12-06T10:17:05,074 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:05,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 196 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51526 deadline: 1733480285072, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:05,077 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:05,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 196 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51536 deadline: 1733480285075, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:05,143 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=418 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/A/6d08b33d1714482c9a78c3bf04d865f8 2024-12-06T10:17:05,156 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/B/b90ebb3dc8df4af58e1aee44dabac07a is 50, key is test_row_0/B:col10/1733480224126/Put/seqid=0 2024-12-06T10:17:05,166 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073741926_1102 (size=12301) 2024-12-06T10:17:05,167 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=418 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/B/b90ebb3dc8df4af58e1aee44dabac07a 2024-12-06T10:17:05,180 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/C/659ff57a17944e44b9a80f348e9579e6 is 50, key is test_row_0/C:col10/1733480224126/Put/seqid=0 2024-12-06T10:17:05,185 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073741927_1103 (size=12301) 2024-12-06T10:17:05,187 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=418 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/C/659ff57a17944e44b9a80f348e9579e6 2024-12-06T10:17:05,195 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/A/6d08b33d1714482c9a78c3bf04d865f8 as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/A/6d08b33d1714482c9a78c3bf04d865f8 2024-12-06T10:17:05,202 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/A/6d08b33d1714482c9a78c3bf04d865f8, entries=150, sequenceid=418, filesize=12.0 K 2024-12-06T10:17:05,204 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/B/b90ebb3dc8df4af58e1aee44dabac07a as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/B/b90ebb3dc8df4af58e1aee44dabac07a 2024-12-06T10:17:05,209 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/B/b90ebb3dc8df4af58e1aee44dabac07a, entries=150, sequenceid=418, filesize=12.0 K 2024-12-06T10:17:05,211 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/C/659ff57a17944e44b9a80f348e9579e6 as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/C/659ff57a17944e44b9a80f348e9579e6 2024-12-06T10:17:05,217 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/C/659ff57a17944e44b9a80f348e9579e6, entries=150, sequenceid=418, filesize=12.0 K 2024-12-06T10:17:05,219 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegion(3040): Finished flush of dataSize ~120.76 KB/123660, heapSize ~317.11 KB/324720, currentSize=80.51 KB/82440 for b58170106b3730174deb9625aeac23df in 491ms, sequenceid=418, compaction requested=true 2024-12-06T10:17:05,219 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegion(2538): Flush status journal for b58170106b3730174deb9625aeac23df: 2024-12-06T10:17:05,219 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df. 2024-12-06T10:17:05,219 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=25 2024-12-06T10:17:05,220 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.HMaster(4106): Remote procedure done, pid=25 2024-12-06T10:17:05,226 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=24, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=24, table=TestAcidGuarantees in 807 msec 2024-12-06T10:17:05,227 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=25, resume processing ppid=24 2024-12-06T10:17:05,227 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=25, ppid=24, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 800 msec 2024-12-06T10:17:05,379 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(8581): Flush requested on b58170106b3730174deb9625aeac23df 2024-12-06T10:17:05,379 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing b58170106b3730174deb9625aeac23df 3/3 column families, dataSize=87.22 KB heapSize=229.27 KB 2024-12-06T10:17:05,380 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b58170106b3730174deb9625aeac23df, store=A 2024-12-06T10:17:05,380 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:17:05,380 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b58170106b3730174deb9625aeac23df, store=B 2024-12-06T10:17:05,380 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:17:05,380 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b58170106b3730174deb9625aeac23df, store=C 2024-12-06T10:17:05,380 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:17:05,386 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/A/9ccaa35752cc44c88e3a6cf73b915d4c is 50, key is test_row_0/A:col10/1733480224763/Put/seqid=0 2024-12-06T10:17:05,393 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073741928_1104 (size=14741) 2024-12-06T10:17:05,394 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=434 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/A/9ccaa35752cc44c88e3a6cf73b915d4c 2024-12-06T10:17:05,414 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/B/f3abf245f5734cf2bb04569c827d32a9 is 50, key is test_row_0/B:col10/1733480224763/Put/seqid=0 2024-12-06T10:17:05,422 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:05,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 206 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51536 deadline: 1733480285419, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:05,423 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:05,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 208 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51526 deadline: 1733480285420, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:05,434 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073741929_1105 (size=12301) 2024-12-06T10:17:05,435 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=434 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/B/f3abf245f5734cf2bb04569c827d32a9 2024-12-06T10:17:05,448 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/C/39ba5771ca05407b8fc0bef2cab7f9de is 50, key is test_row_0/C:col10/1733480224763/Put/seqid=0 2024-12-06T10:17:05,456 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073741930_1106 (size=12301) 2024-12-06T10:17:05,457 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=434 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/C/39ba5771ca05407b8fc0bef2cab7f9de 2024-12-06T10:17:05,464 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/A/9ccaa35752cc44c88e3a6cf73b915d4c as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/A/9ccaa35752cc44c88e3a6cf73b915d4c 2024-12-06T10:17:05,472 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/A/9ccaa35752cc44c88e3a6cf73b915d4c, entries=200, sequenceid=434, filesize=14.4 K 2024-12-06T10:17:05,473 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/B/f3abf245f5734cf2bb04569c827d32a9 as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/B/f3abf245f5734cf2bb04569c827d32a9 2024-12-06T10:17:05,478 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/B/f3abf245f5734cf2bb04569c827d32a9, entries=150, sequenceid=434, filesize=12.0 K 2024-12-06T10:17:05,479 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/C/39ba5771ca05407b8fc0bef2cab7f9de as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/C/39ba5771ca05407b8fc0bef2cab7f9de 2024-12-06T10:17:05,485 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/C/39ba5771ca05407b8fc0bef2cab7f9de, entries=150, sequenceid=434, filesize=12.0 K 2024-12-06T10:17:05,486 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~87.22 KB/89310, heapSize ~229.22 KB/234720, currentSize=114.05 KB/116790 for b58170106b3730174deb9625aeac23df in 107ms, sequenceid=434, compaction requested=true 2024-12-06T10:17:05,486 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for b58170106b3730174deb9625aeac23df: 2024-12-06T10:17:05,486 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store b58170106b3730174deb9625aeac23df:A, priority=-2147483648, current under compaction store size is 1 2024-12-06T10:17:05,486 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T10:17:05,486 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-06T10:17:05,486 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-06T10:17:05,487 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store b58170106b3730174deb9625aeac23df:B, priority=-2147483648, current under compaction store size is 2 2024-12-06T10:17:05,487 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T10:17:05,487 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store b58170106b3730174deb9625aeac23df:C, priority=-2147483648, current under compaction store size is 3 2024-12-06T10:17:05,487 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-06T10:17:05,489 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 50124 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-06T10:17:05,489 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 52564 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-06T10:17:05,489 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HStore(1540): b58170106b3730174deb9625aeac23df/A is initiating minor compaction (all files) 2024-12-06T10:17:05,489 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HStore(1540): b58170106b3730174deb9625aeac23df/B is initiating minor compaction (all files) 2024-12-06T10:17:05,489 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of b58170106b3730174deb9625aeac23df/A in TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df. 2024-12-06T10:17:05,489 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of b58170106b3730174deb9625aeac23df/B in TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df. 2024-12-06T10:17:05,489 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/A/b29606ead712487fb468bc685526f5bb, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/A/71c720d63d934fa5ba59419363e07690, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/A/6d08b33d1714482c9a78c3bf04d865f8, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/A/9ccaa35752cc44c88e3a6cf73b915d4c] into tmpdir=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp, totalSize=51.3 K 2024-12-06T10:17:05,489 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/B/ab3d4085edbe4e8faf8427763a14a4a8, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/B/516c9d5363fe49e88807697ae8097a79, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/B/b90ebb3dc8df4af58e1aee44dabac07a, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/B/f3abf245f5734cf2bb04569c827d32a9] into tmpdir=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp, totalSize=48.9 K 2024-12-06T10:17:05,490 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.Compactor(224): Compacting ab3d4085edbe4e8faf8427763a14a4a8, keycount=150, bloomtype=ROW, size=12.9 K, encoding=NONE, compression=NONE, seqNum=379, earliestPutTs=1733480223316 2024-12-06T10:17:05,490 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.Compactor(224): Compacting b29606ead712487fb468bc685526f5bb, keycount=150, bloomtype=ROW, size=12.9 K, encoding=NONE, compression=NONE, seqNum=379, earliestPutTs=1733480223316 2024-12-06T10:17:05,491 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.Compactor(224): Compacting 516c9d5363fe49e88807697ae8097a79, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=394, earliestPutTs=1733480223971 2024-12-06T10:17:05,491 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.Compactor(224): Compacting 71c720d63d934fa5ba59419363e07690, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=394, earliestPutTs=1733480223971 2024-12-06T10:17:05,492 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.Compactor(224): Compacting b90ebb3dc8df4af58e1aee44dabac07a, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=418, earliestPutTs=1733480224126 2024-12-06T10:17:05,492 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.Compactor(224): Compacting 6d08b33d1714482c9a78c3bf04d865f8, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=418, earliestPutTs=1733480224126 2024-12-06T10:17:05,492 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.Compactor(224): Compacting f3abf245f5734cf2bb04569c827d32a9, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=434, earliestPutTs=1733480224763 2024-12-06T10:17:05,492 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.Compactor(224): Compacting 9ccaa35752cc44c88e3a6cf73b915d4c, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=434, earliestPutTs=1733480224749 2024-12-06T10:17:05,511 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): b58170106b3730174deb9625aeac23df#B#compaction#92 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-06T10:17:05,512 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/B/40862726ae484cdfa9378e2dadf82747 is 50, key is test_row_0/B:col10/1733480224763/Put/seqid=0 2024-12-06T10:17:05,513 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): b58170106b3730174deb9625aeac23df#A#compaction#93 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-06T10:17:05,514 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/A/618fa0af1291438ca45b7b0201ca03a8 is 50, key is test_row_0/A:col10/1733480224763/Put/seqid=0 2024-12-06T10:17:05,519 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073741931_1107 (size=13357) 2024-12-06T10:17:05,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=24 2024-12-06T10:17:05,523 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 24 completed 2024-12-06T10:17:05,524 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073741932_1108 (size=13357) 2024-12-06T10:17:05,526 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/B/40862726ae484cdfa9378e2dadf82747 as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/B/40862726ae484cdfa9378e2dadf82747 2024-12-06T10:17:05,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(8581): Flush requested on b58170106b3730174deb9625aeac23df 2024-12-06T10:17:05,527 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-06T10:17:05,527 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing b58170106b3730174deb9625aeac23df 3/3 column families, dataSize=120.76 KB heapSize=317.16 KB 2024-12-06T10:17:05,527 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b58170106b3730174deb9625aeac23df, store=A 2024-12-06T10:17:05,528 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:17:05,528 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b58170106b3730174deb9625aeac23df, store=B 2024-12-06T10:17:05,528 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:17:05,528 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b58170106b3730174deb9625aeac23df, store=C 2024-12-06T10:17:05,528 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:17:05,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] procedure2.ProcedureExecutor(1098): Stored pid=26, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=26, table=TestAcidGuarantees 2024-12-06T10:17:05,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=26 2024-12-06T10:17:05,529 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=26, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=26, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-06T10:17:05,530 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=26, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=26, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-06T10:17:05,530 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=27, ppid=26, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-06T10:17:05,538 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/A/0d9c3744b3824f9f8c431a6525b635b9 is 50, key is test_row_0/A:col10/1733480225525/Put/seqid=0 2024-12-06T10:17:05,541 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/A/618fa0af1291438ca45b7b0201ca03a8 as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/A/618fa0af1291438ca45b7b0201ca03a8 2024-12-06T10:17:05,542 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in b58170106b3730174deb9625aeac23df/B of b58170106b3730174deb9625aeac23df into 40862726ae484cdfa9378e2dadf82747(size=13.0 K), total size for store is 13.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-06T10:17:05,542 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for b58170106b3730174deb9625aeac23df: 2024-12-06T10:17:05,542 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df., storeName=b58170106b3730174deb9625aeac23df/B, priority=12, startTime=1733480225486; duration=0sec 2024-12-06T10:17:05,543 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-06T10:17:05,543 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: b58170106b3730174deb9625aeac23df:B 2024-12-06T10:17:05,543 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-06T10:17:05,547 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 50090 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-06T10:17:05,547 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HStore(1540): b58170106b3730174deb9625aeac23df/C is initiating minor compaction (all files) 2024-12-06T10:17:05,547 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of b58170106b3730174deb9625aeac23df/C in TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df. 2024-12-06T10:17:05,547 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/C/bfbcb8331c244c2a9978127a5740bd52, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/C/738c3d438c414e189e039d49a6ef915e, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/C/659ff57a17944e44b9a80f348e9579e6, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/C/39ba5771ca05407b8fc0bef2cab7f9de] into tmpdir=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp, totalSize=48.9 K 2024-12-06T10:17:05,548 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.Compactor(224): Compacting bfbcb8331c244c2a9978127a5740bd52, keycount=150, bloomtype=ROW, size=12.9 K, encoding=NONE, compression=NONE, seqNum=379, earliestPutTs=1733480223316 2024-12-06T10:17:05,549 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.Compactor(224): Compacting 738c3d438c414e189e039d49a6ef915e, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=394, earliestPutTs=1733480223971 2024-12-06T10:17:05,550 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.Compactor(224): Compacting 659ff57a17944e44b9a80f348e9579e6, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=418, earliestPutTs=1733480224126 2024-12-06T10:17:05,551 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.Compactor(224): Compacting 39ba5771ca05407b8fc0bef2cab7f9de, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=434, earliestPutTs=1733480224763 2024-12-06T10:17:05,552 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in b58170106b3730174deb9625aeac23df/A of b58170106b3730174deb9625aeac23df into 618fa0af1291438ca45b7b0201ca03a8(size=13.0 K), total size for store is 13.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-06T10:17:05,553 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for b58170106b3730174deb9625aeac23df: 2024-12-06T10:17:05,553 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df., storeName=b58170106b3730174deb9625aeac23df/A, priority=12, startTime=1733480225486; duration=0sec 2024-12-06T10:17:05,553 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T10:17:05,554 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: b58170106b3730174deb9625aeac23df:A 2024-12-06T10:17:05,561 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:05,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 214 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51536 deadline: 1733480285558, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:05,564 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:05,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 217 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51526 deadline: 1733480285562, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:05,567 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): b58170106b3730174deb9625aeac23df#C#compaction#95 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-06T10:17:05,567 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/C/be8dd653f7eb4772b9fdc8219c1f200d is 50, key is test_row_0/C:col10/1733480224763/Put/seqid=0 2024-12-06T10:17:05,569 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073741933_1109 (size=14741) 2024-12-06T10:17:05,570 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=455 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/A/0d9c3744b3824f9f8c431a6525b635b9 2024-12-06T10:17:05,600 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/B/078bf0d48a234dbfab0418619e0b7b26 is 50, key is test_row_0/B:col10/1733480225525/Put/seqid=0 2024-12-06T10:17:05,601 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073741934_1110 (size=13323) 2024-12-06T10:17:05,609 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/C/be8dd653f7eb4772b9fdc8219c1f200d as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/C/be8dd653f7eb4772b9fdc8219c1f200d 2024-12-06T10:17:05,616 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073741935_1111 (size=12301) 2024-12-06T10:17:05,619 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=455 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/B/078bf0d48a234dbfab0418619e0b7b26 2024-12-06T10:17:05,620 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in b58170106b3730174deb9625aeac23df/C of b58170106b3730174deb9625aeac23df into be8dd653f7eb4772b9fdc8219c1f200d(size=13.0 K), total size for store is 13.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-06T10:17:05,620 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for b58170106b3730174deb9625aeac23df: 2024-12-06T10:17:05,620 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df., storeName=b58170106b3730174deb9625aeac23df/C, priority=12, startTime=1733480225487; duration=0sec 2024-12-06T10:17:05,620 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T10:17:05,620 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: b58170106b3730174deb9625aeac23df:C 2024-12-06T10:17:05,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=26 2024-12-06T10:17:05,635 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/C/8584c6885f2c4a9a883142608020f645 is 50, key is test_row_0/C:col10/1733480225525/Put/seqid=0 2024-12-06T10:17:05,645 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073741936_1112 (size=12301) 2024-12-06T10:17:05,663 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:05,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 216 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51536 deadline: 1733480285662, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:05,666 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:05,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 219 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51526 deadline: 1733480285666, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:05,682 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,33397,1733480204743 2024-12-06T10:17:05,683 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33397 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=27 2024-12-06T10:17:05,683 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df. 2024-12-06T10:17:05,683 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df. as already flushing 2024-12-06T10:17:05,683 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df. 2024-12-06T10:17:05,683 ERROR [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] handler.RSProcedureHandler(58): pid=27 java.io.IOException: Unable to complete flush {ENCODED => b58170106b3730174deb9625aeac23df, NAME => 'TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:17:05,684 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=27 java.io.IOException: Unable to complete flush {ENCODED => b58170106b3730174deb9625aeac23df, NAME => 'TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:17:05,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.HMaster(4114): Remote procedure failed, pid=27 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => b58170106b3730174deb9625aeac23df, NAME => 'TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => b58170106b3730174deb9625aeac23df, NAME => 'TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:17:05,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=26 2024-12-06T10:17:05,836 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,33397,1733480204743 2024-12-06T10:17:05,837 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33397 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=27 2024-12-06T10:17:05,837 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df. 2024-12-06T10:17:05,837 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df. as already flushing 2024-12-06T10:17:05,837 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df. 2024-12-06T10:17:05,837 ERROR [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] handler.RSProcedureHandler(58): pid=27 java.io.IOException: Unable to complete flush {ENCODED => b58170106b3730174deb9625aeac23df, NAME => 'TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:17:05,838 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=27 java.io.IOException: Unable to complete flush {ENCODED => b58170106b3730174deb9625aeac23df, NAME => 'TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:17:05,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.HMaster(4114): Remote procedure failed, pid=27 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => b58170106b3730174deb9625aeac23df, NAME => 'TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => b58170106b3730174deb9625aeac23df, NAME => 'TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:17:05,867 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:05,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 218 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51536 deadline: 1733480285866, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:05,870 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:05,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 221 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51526 deadline: 1733480285869, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:05,990 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,33397,1733480204743 2024-12-06T10:17:05,991 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33397 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=27 2024-12-06T10:17:05,991 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df. 2024-12-06T10:17:05,991 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df. as already flushing 2024-12-06T10:17:05,991 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df. 2024-12-06T10:17:05,991 ERROR [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] handler.RSProcedureHandler(58): pid=27 java.io.IOException: Unable to complete flush {ENCODED => b58170106b3730174deb9625aeac23df, NAME => 'TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:17:05,991 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=27 java.io.IOException: Unable to complete flush {ENCODED => b58170106b3730174deb9625aeac23df, NAME => 'TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:17:05,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.HMaster(4114): Remote procedure failed, pid=27 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => b58170106b3730174deb9625aeac23df, NAME => 'TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => b58170106b3730174deb9625aeac23df, NAME => 'TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:17:06,046 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=455 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/C/8584c6885f2c4a9a883142608020f645 2024-12-06T10:17:06,054 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/A/0d9c3744b3824f9f8c431a6525b635b9 as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/A/0d9c3744b3824f9f8c431a6525b635b9 2024-12-06T10:17:06,060 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/A/0d9c3744b3824f9f8c431a6525b635b9, entries=200, sequenceid=455, filesize=14.4 K 2024-12-06T10:17:06,062 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/B/078bf0d48a234dbfab0418619e0b7b26 as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/B/078bf0d48a234dbfab0418619e0b7b26 2024-12-06T10:17:06,067 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/B/078bf0d48a234dbfab0418619e0b7b26, entries=150, sequenceid=455, filesize=12.0 K 2024-12-06T10:17:06,068 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/C/8584c6885f2c4a9a883142608020f645 as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/C/8584c6885f2c4a9a883142608020f645 2024-12-06T10:17:06,073 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/C/8584c6885f2c4a9a883142608020f645, entries=150, sequenceid=455, filesize=12.0 K 2024-12-06T10:17:06,074 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~120.76 KB/123660, heapSize ~317.11 KB/324720, currentSize=80.51 KB/82440 for b58170106b3730174deb9625aeac23df in 547ms, sequenceid=455, compaction requested=false 2024-12-06T10:17:06,074 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for b58170106b3730174deb9625aeac23df: 2024-12-06T10:17:06,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=26 2024-12-06T10:17:06,143 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,33397,1733480204743 2024-12-06T10:17:06,144 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33397 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=27 2024-12-06T10:17:06,144 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df. 2024-12-06T10:17:06,145 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegion(2837): Flushing b58170106b3730174deb9625aeac23df 3/3 column families, dataSize=80.51 KB heapSize=211.69 KB 2024-12-06T10:17:06,145 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b58170106b3730174deb9625aeac23df, store=A 2024-12-06T10:17:06,145 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:17:06,145 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b58170106b3730174deb9625aeac23df, store=B 2024-12-06T10:17:06,145 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:17:06,145 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b58170106b3730174deb9625aeac23df, store=C 2024-12-06T10:17:06,145 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:17:06,155 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/A/8ed94023b79d4db2b3f57a5574d97ec5 is 50, key is test_row_0/A:col10/1733480225550/Put/seqid=0 2024-12-06T10:17:06,165 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073741937_1113 (size=12301) 2024-12-06T10:17:06,166 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=473 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/A/8ed94023b79d4db2b3f57a5574d97ec5 2024-12-06T10:17:06,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(8581): Flush requested on b58170106b3730174deb9625aeac23df 2024-12-06T10:17:06,175 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df. as already flushing 2024-12-06T10:17:06,188 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/B/cee88b2fe49946df96ca7732a8d48b80 is 50, key is test_row_0/B:col10/1733480225550/Put/seqid=0 2024-12-06T10:17:06,202 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:06,203 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 229 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51536 deadline: 1733480286201, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:06,205 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:06,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 232 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51526 deadline: 1733480286202, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:06,214 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073741938_1114 (size=12301) 2024-12-06T10:17:06,218 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=473 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/B/cee88b2fe49946df96ca7732a8d48b80 2024-12-06T10:17:06,233 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/C/3e3c917afadd44ed803ab1dc8e8a92fa is 50, key is test_row_0/C:col10/1733480225550/Put/seqid=0 2024-12-06T10:17:06,252 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073741939_1115 (size=12301) 2024-12-06T10:17:06,253 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=473 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/C/3e3c917afadd44ed803ab1dc8e8a92fa 2024-12-06T10:17:06,263 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/A/8ed94023b79d4db2b3f57a5574d97ec5 as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/A/8ed94023b79d4db2b3f57a5574d97ec5 2024-12-06T10:17:06,270 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/A/8ed94023b79d4db2b3f57a5574d97ec5, entries=150, sequenceid=473, filesize=12.0 K 2024-12-06T10:17:06,272 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/B/cee88b2fe49946df96ca7732a8d48b80 as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/B/cee88b2fe49946df96ca7732a8d48b80 2024-12-06T10:17:06,279 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/B/cee88b2fe49946df96ca7732a8d48b80, entries=150, sequenceid=473, filesize=12.0 K 2024-12-06T10:17:06,281 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/C/3e3c917afadd44ed803ab1dc8e8a92fa as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/C/3e3c917afadd44ed803ab1dc8e8a92fa 2024-12-06T10:17:06,287 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/C/3e3c917afadd44ed803ab1dc8e8a92fa, entries=150, sequenceid=473, filesize=12.0 K 2024-12-06T10:17:06,288 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegion(3040): Finished flush of dataSize ~80.51 KB/82440, heapSize ~211.64 KB/216720, currentSize=120.76 KB/123660 for b58170106b3730174deb9625aeac23df in 144ms, sequenceid=473, compaction requested=true 2024-12-06T10:17:06,288 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegion(2538): Flush status journal for b58170106b3730174deb9625aeac23df: 2024-12-06T10:17:06,289 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df. 2024-12-06T10:17:06,289 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=27 2024-12-06T10:17:06,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.HMaster(4106): Remote procedure done, pid=27 2024-12-06T10:17:06,292 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=27, resume processing ppid=26 2024-12-06T10:17:06,292 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=27, ppid=26, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 760 msec 2024-12-06T10:17:06,294 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=26, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=26, table=TestAcidGuarantees in 766 msec 2024-12-06T10:17:06,306 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(8581): Flush requested on b58170106b3730174deb9625aeac23df 2024-12-06T10:17:06,306 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing b58170106b3730174deb9625aeac23df 3/3 column families, dataSize=127.47 KB heapSize=334.73 KB 2024-12-06T10:17:06,307 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b58170106b3730174deb9625aeac23df, store=A 2024-12-06T10:17:06,307 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:17:06,307 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b58170106b3730174deb9625aeac23df, store=B 2024-12-06T10:17:06,307 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:17:06,307 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b58170106b3730174deb9625aeac23df, store=C 2024-12-06T10:17:06,307 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:17:06,312 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/A/9131a8dbfafd4ac3a779bfd512f9658a is 50, key is test_row_0/A:col10/1733480226201/Put/seqid=0 2024-12-06T10:17:06,318 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073741940_1116 (size=14741) 2024-12-06T10:17:06,318 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=495 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/A/9131a8dbfafd4ac3a779bfd512f9658a 2024-12-06T10:17:06,327 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/B/0749e4107b9d452c8cd2e0c157be3de3 is 50, key is test_row_0/B:col10/1733480226201/Put/seqid=0 2024-12-06T10:17:06,333 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073741941_1117 (size=12301) 2024-12-06T10:17:06,333 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:06,334 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 238 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51536 deadline: 1733480286331, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:06,334 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=495 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/B/0749e4107b9d452c8cd2e0c157be3de3 2024-12-06T10:17:06,336 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:06,337 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 240 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51526 deadline: 1733480286331, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:06,344 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/C/b248db06ea77457288ffd288cd445069 is 50, key is test_row_0/C:col10/1733480226201/Put/seqid=0 2024-12-06T10:17:06,349 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073741942_1118 (size=12301) 2024-12-06T10:17:06,435 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:06,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 240 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51536 deadline: 1733480286435, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:06,439 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:06,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 242 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51526 deadline: 1733480286438, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:06,466 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:06,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 159 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51554 deadline: 1733480286466, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:06,468 DEBUG [Thread-149 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4135 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df., hostname=552d6a33fa09,33397,1733480204743, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-06T10:17:06,471 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:06,471 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 160 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51552 deadline: 1733480286469, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:06,472 DEBUG [Thread-151 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4144 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df., hostname=552d6a33fa09,33397,1733480204743, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-06T10:17:06,474 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:06,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 158 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51570 deadline: 1733480286473, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:06,475 DEBUG [Thread-155 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4143 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df., hostname=552d6a33fa09,33397,1733480204743, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-06T10:17:06,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=26 2024-12-06T10:17:06,633 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 26 completed 2024-12-06T10:17:06,634 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-06T10:17:06,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] procedure2.ProcedureExecutor(1098): Stored pid=28, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=28, table=TestAcidGuarantees 2024-12-06T10:17:06,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=28 2024-12-06T10:17:06,637 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=28, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=28, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-06T10:17:06,637 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=28, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=28, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-06T10:17:06,638 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=29, ppid=28, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-06T10:17:06,639 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:06,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 242 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51536 deadline: 1733480286637, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:06,641 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:06,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 244 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51526 deadline: 1733480286641, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:06,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=28 2024-12-06T10:17:06,752 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=495 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/C/b248db06ea77457288ffd288cd445069 2024-12-06T10:17:06,765 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/A/9131a8dbfafd4ac3a779bfd512f9658a as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/A/9131a8dbfafd4ac3a779bfd512f9658a 2024-12-06T10:17:06,770 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/A/9131a8dbfafd4ac3a779bfd512f9658a, entries=200, sequenceid=495, filesize=14.4 K 2024-12-06T10:17:06,772 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/B/0749e4107b9d452c8cd2e0c157be3de3 as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/B/0749e4107b9d452c8cd2e0c157be3de3 2024-12-06T10:17:06,781 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/B/0749e4107b9d452c8cd2e0c157be3de3, entries=150, sequenceid=495, filesize=12.0 K 2024-12-06T10:17:06,782 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/C/b248db06ea77457288ffd288cd445069 as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/C/b248db06ea77457288ffd288cd445069 2024-12-06T10:17:06,788 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/C/b248db06ea77457288ffd288cd445069, entries=150, sequenceid=495, filesize=12.0 K 2024-12-06T10:17:06,789 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~127.47 KB/130530, heapSize ~334.69 KB/342720, currentSize=80.51 KB/82440 for b58170106b3730174deb9625aeac23df in 483ms, sequenceid=495, compaction requested=true 2024-12-06T10:17:06,789 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for b58170106b3730174deb9625aeac23df: 2024-12-06T10:17:06,789 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,33397,1733480204743 2024-12-06T10:17:06,789 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store b58170106b3730174deb9625aeac23df:A, priority=-2147483648, current under compaction store size is 1 2024-12-06T10:17:06,789 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T10:17:06,789 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-06T10:17:06,789 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store b58170106b3730174deb9625aeac23df:B, priority=-2147483648, current under compaction store size is 2 2024-12-06T10:17:06,791 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 55140 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-06T10:17:06,792 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T10:17:06,792 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HStore(1540): b58170106b3730174deb9625aeac23df/A is initiating minor compaction (all files) 2024-12-06T10:17:06,792 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store b58170106b3730174deb9625aeac23df:C, priority=-2147483648, current under compaction store size is 3 2024-12-06T10:17:06,792 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-06T10:17:06,792 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of b58170106b3730174deb9625aeac23df/A in TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df. 2024-12-06T10:17:06,792 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/A/618fa0af1291438ca45b7b0201ca03a8, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/A/0d9c3744b3824f9f8c431a6525b635b9, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/A/8ed94023b79d4db2b3f57a5574d97ec5, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/A/9131a8dbfafd4ac3a779bfd512f9658a] into tmpdir=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp, totalSize=53.8 K 2024-12-06T10:17:06,790 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33397 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=29 2024-12-06T10:17:06,790 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-06T10:17:06,792 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df. 2024-12-06T10:17:06,793 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HRegion(2837): Flushing b58170106b3730174deb9625aeac23df 3/3 column families, dataSize=80.51 KB heapSize=211.69 KB 2024-12-06T10:17:06,793 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b58170106b3730174deb9625aeac23df, store=A 2024-12-06T10:17:06,793 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:17:06,793 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b58170106b3730174deb9625aeac23df, store=B 2024-12-06T10:17:06,793 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:17:06,793 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b58170106b3730174deb9625aeac23df, store=C 2024-12-06T10:17:06,793 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:17:06,800 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.Compactor(224): Compacting 618fa0af1291438ca45b7b0201ca03a8, keycount=150, bloomtype=ROW, size=13.0 K, encoding=NONE, compression=NONE, seqNum=434, earliestPutTs=1733480224763 2024-12-06T10:17:06,801 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.Compactor(224): Compacting 0d9c3744b3824f9f8c431a6525b635b9, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=455, earliestPutTs=1733480225408 2024-12-06T10:17:06,802 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.Compactor(224): Compacting 8ed94023b79d4db2b3f57a5574d97ec5, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=473, earliestPutTs=1733480225544 2024-12-06T10:17:06,803 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 50260 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-06T10:17:06,803 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HStore(1540): b58170106b3730174deb9625aeac23df/B is initiating minor compaction (all files) 2024-12-06T10:17:06,803 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of b58170106b3730174deb9625aeac23df/B in TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df. 2024-12-06T10:17:06,803 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/B/40862726ae484cdfa9378e2dadf82747, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/B/078bf0d48a234dbfab0418619e0b7b26, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/B/cee88b2fe49946df96ca7732a8d48b80, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/B/0749e4107b9d452c8cd2e0c157be3de3] into tmpdir=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp, totalSize=49.1 K 2024-12-06T10:17:06,803 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.Compactor(224): Compacting 9131a8dbfafd4ac3a779bfd512f9658a, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=495, earliestPutTs=1733480226193 2024-12-06T10:17:06,804 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.Compactor(224): Compacting 40862726ae484cdfa9378e2dadf82747, keycount=150, bloomtype=ROW, size=13.0 K, encoding=NONE, compression=NONE, seqNum=434, earliestPutTs=1733480224763 2024-12-06T10:17:06,805 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.Compactor(224): Compacting 078bf0d48a234dbfab0418619e0b7b26, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=455, earliestPutTs=1733480225408 2024-12-06T10:17:06,805 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.Compactor(224): Compacting cee88b2fe49946df96ca7732a8d48b80, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=473, earliestPutTs=1733480225544 2024-12-06T10:17:06,806 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.Compactor(224): Compacting 0749e4107b9d452c8cd2e0c157be3de3, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=495, earliestPutTs=1733480226198 2024-12-06T10:17:06,810 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/A/29126e5084024306a3c2df5317f369b2 is 50, key is test_row_0/A:col10/1733480226317/Put/seqid=0 2024-12-06T10:17:06,827 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): b58170106b3730174deb9625aeac23df#A#compaction#105 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-06T10:17:06,828 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/A/1812227c7c0148668e76bdd9f0c7466a is 50, key is test_row_0/A:col10/1733480226201/Put/seqid=0 2024-12-06T10:17:06,832 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): b58170106b3730174deb9625aeac23df#B#compaction#106 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-06T10:17:06,832 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/B/abf63d44b7a94ea6a820ecce97a33da9 is 50, key is test_row_0/B:col10/1733480226201/Put/seqid=0 2024-12-06T10:17:06,837 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073741943_1119 (size=12301) 2024-12-06T10:17:06,845 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073741944_1120 (size=13493) 2024-12-06T10:17:06,853 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073741945_1121 (size=13493) 2024-12-06T10:17:06,865 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/A/1812227c7c0148668e76bdd9f0c7466a as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/A/1812227c7c0148668e76bdd9f0c7466a 2024-12-06T10:17:06,866 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/B/abf63d44b7a94ea6a820ecce97a33da9 as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/B/abf63d44b7a94ea6a820ecce97a33da9 2024-12-06T10:17:06,873 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in b58170106b3730174deb9625aeac23df/A of b58170106b3730174deb9625aeac23df into 1812227c7c0148668e76bdd9f0c7466a(size=13.2 K), total size for store is 13.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-06T10:17:06,873 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for b58170106b3730174deb9625aeac23df: 2024-12-06T10:17:06,873 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df., storeName=b58170106b3730174deb9625aeac23df/A, priority=12, startTime=1733480226789; duration=0sec 2024-12-06T10:17:06,873 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-06T10:17:06,873 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: b58170106b3730174deb9625aeac23df:A 2024-12-06T10:17:06,873 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-06T10:17:06,876 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 50226 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-06T10:17:06,876 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HStore(1540): b58170106b3730174deb9625aeac23df/C is initiating minor compaction (all files) 2024-12-06T10:17:06,876 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of b58170106b3730174deb9625aeac23df/C in TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df. 2024-12-06T10:17:06,876 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/C/be8dd653f7eb4772b9fdc8219c1f200d, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/C/8584c6885f2c4a9a883142608020f645, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/C/3e3c917afadd44ed803ab1dc8e8a92fa, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/C/b248db06ea77457288ffd288cd445069] into tmpdir=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp, totalSize=49.0 K 2024-12-06T10:17:06,877 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.Compactor(224): Compacting be8dd653f7eb4772b9fdc8219c1f200d, keycount=150, bloomtype=ROW, size=13.0 K, encoding=NONE, compression=NONE, seqNum=434, earliestPutTs=1733480224763 2024-12-06T10:17:06,877 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.Compactor(224): Compacting 8584c6885f2c4a9a883142608020f645, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=455, earliestPutTs=1733480225408 2024-12-06T10:17:06,877 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.Compactor(224): Compacting 3e3c917afadd44ed803ab1dc8e8a92fa, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=473, earliestPutTs=1733480225544 2024-12-06T10:17:06,878 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in b58170106b3730174deb9625aeac23df/B of b58170106b3730174deb9625aeac23df into abf63d44b7a94ea6a820ecce97a33da9(size=13.2 K), total size for store is 13.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-06T10:17:06,878 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for b58170106b3730174deb9625aeac23df: 2024-12-06T10:17:06,878 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.Compactor(224): Compacting b248db06ea77457288ffd288cd445069, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=495, earliestPutTs=1733480226198 2024-12-06T10:17:06,878 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df., storeName=b58170106b3730174deb9625aeac23df/B, priority=12, startTime=1733480226789; duration=0sec 2024-12-06T10:17:06,878 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T10:17:06,878 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: b58170106b3730174deb9625aeac23df:B 2024-12-06T10:17:06,891 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): b58170106b3730174deb9625aeac23df#C#compaction#107 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-06T10:17:06,892 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/C/5159ffeb18b04a458191e8b194c0db91 is 50, key is test_row_0/C:col10/1733480226201/Put/seqid=0 2024-12-06T10:17:06,901 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073741946_1122 (size=13459) 2024-12-06T10:17:06,910 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/C/5159ffeb18b04a458191e8b194c0db91 as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/C/5159ffeb18b04a458191e8b194c0db91 2024-12-06T10:17:06,917 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in b58170106b3730174deb9625aeac23df/C of b58170106b3730174deb9625aeac23df into 5159ffeb18b04a458191e8b194c0db91(size=13.1 K), total size for store is 13.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-06T10:17:06,917 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for b58170106b3730174deb9625aeac23df: 2024-12-06T10:17:06,917 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df., storeName=b58170106b3730174deb9625aeac23df/C, priority=12, startTime=1733480226792; duration=0sec 2024-12-06T10:17:06,918 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T10:17:06,919 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: b58170106b3730174deb9625aeac23df:C 2024-12-06T10:17:06,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=28 2024-12-06T10:17:06,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(8581): Flush requested on b58170106b3730174deb9625aeac23df 2024-12-06T10:17:06,945 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df. as already flushing 2024-12-06T10:17:06,992 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:06,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 254 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51536 deadline: 1733480286992, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:06,993 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:06,993 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 255 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51526 deadline: 1733480286992, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:07,095 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:07,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 256 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51536 deadline: 1733480287094, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:07,095 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:07,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 257 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51526 deadline: 1733480287094, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:07,238 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=28 2024-12-06T10:17:07,239 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=510 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/A/29126e5084024306a3c2df5317f369b2 2024-12-06T10:17:07,249 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/B/45445a5985024dc7843c274e7fe253ab is 50, key is test_row_0/B:col10/1733480226317/Put/seqid=0 2024-12-06T10:17:07,254 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073741947_1123 (size=12301) 2024-12-06T10:17:07,297 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:07,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 258 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51536 deadline: 1733480287296, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:07,298 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:07,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 259 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51526 deadline: 1733480287297, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:07,600 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:07,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 260 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51536 deadline: 1733480287599, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:07,601 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:07,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 261 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51526 deadline: 1733480287599, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:07,654 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=510 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/B/45445a5985024dc7843c274e7fe253ab 2024-12-06T10:17:07,664 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/C/afef44fc564043058eb987a3b755861c is 50, key is test_row_0/C:col10/1733480226317/Put/seqid=0 2024-12-06T10:17:07,669 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073741948_1124 (size=12301) 2024-12-06T10:17:07,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=28 2024-12-06T10:17:08,070 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=510 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/C/afef44fc564043058eb987a3b755861c 2024-12-06T10:17:08,076 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/A/29126e5084024306a3c2df5317f369b2 as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/A/29126e5084024306a3c2df5317f369b2 2024-12-06T10:17:08,094 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/A/29126e5084024306a3c2df5317f369b2, entries=150, sequenceid=510, filesize=12.0 K 2024-12-06T10:17:08,095 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/B/45445a5985024dc7843c274e7fe253ab as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/B/45445a5985024dc7843c274e7fe253ab 2024-12-06T10:17:08,101 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/B/45445a5985024dc7843c274e7fe253ab, entries=150, sequenceid=510, filesize=12.0 K 2024-12-06T10:17:08,103 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/C/afef44fc564043058eb987a3b755861c as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/C/afef44fc564043058eb987a3b755861c 2024-12-06T10:17:08,104 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:08,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 262 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51536 deadline: 1733480288103, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:08,107 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:08,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 263 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51526 deadline: 1733480288105, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:08,110 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/C/afef44fc564043058eb987a3b755861c, entries=150, sequenceid=510, filesize=12.0 K 2024-12-06T10:17:08,115 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HRegion(3040): Finished flush of dataSize ~80.51 KB/82440, heapSize ~211.64 KB/216720, currentSize=127.47 KB/130530 for b58170106b3730174deb9625aeac23df in 1323ms, sequenceid=510, compaction requested=false 2024-12-06T10:17:08,115 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HRegion(2538): Flush status journal for b58170106b3730174deb9625aeac23df: 2024-12-06T10:17:08,115 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df. 2024-12-06T10:17:08,115 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=29 2024-12-06T10:17:08,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.HMaster(4106): Remote procedure done, pid=29 2024-12-06T10:17:08,119 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=29, resume processing ppid=28 2024-12-06T10:17:08,119 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=29, ppid=28, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.4790 sec 2024-12-06T10:17:08,121 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=28, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=28, table=TestAcidGuarantees in 1.4850 sec 2024-12-06T10:17:08,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=28 2024-12-06T10:17:08,740 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 28 completed 2024-12-06T10:17:08,742 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-06T10:17:08,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] procedure2.ProcedureExecutor(1098): Stored pid=30, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=30, table=TestAcidGuarantees 2024-12-06T10:17:08,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=30 2024-12-06T10:17:08,743 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=30, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=30, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-06T10:17:08,744 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=30, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=30, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-06T10:17:08,744 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=31, ppid=30, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-06T10:17:08,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=30 2024-12-06T10:17:08,902 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,33397,1733480204743 2024-12-06T10:17:08,903 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33397 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=31 2024-12-06T10:17:08,903 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df. 2024-12-06T10:17:08,903 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.HRegion(2837): Flushing b58170106b3730174deb9625aeac23df 3/3 column families, dataSize=127.47 KB heapSize=334.73 KB 2024-12-06T10:17:08,904 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b58170106b3730174deb9625aeac23df, store=A 2024-12-06T10:17:08,904 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:17:08,904 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b58170106b3730174deb9625aeac23df, store=B 2024-12-06T10:17:08,904 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:17:08,904 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b58170106b3730174deb9625aeac23df, store=C 2024-12-06T10:17:08,904 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:17:08,910 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=31}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/A/52c690c4c6ba44d2b4eee59eb4ada720 is 50, key is test_row_0/A:col10/1733480226990/Put/seqid=0 2024-12-06T10:17:08,915 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073741949_1125 (size=12301) 2024-12-06T10:17:09,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=30 2024-12-06T10:17:09,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(8581): Flush requested on b58170106b3730174deb9625aeac23df 2024-12-06T10:17:09,118 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df. as already flushing 2024-12-06T10:17:09,133 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:09,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 269 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51526 deadline: 1733480289131, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:09,133 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:09,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 271 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51536 deadline: 1733480289133, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:09,236 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:09,236 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 271 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51526 deadline: 1733480289234, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:09,237 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:09,237 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 273 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51536 deadline: 1733480289235, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:09,316 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=535 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/A/52c690c4c6ba44d2b4eee59eb4ada720 2024-12-06T10:17:09,326 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=31}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/B/768901de0e87453ca5fa60deb010d080 is 50, key is test_row_0/B:col10/1733480226990/Put/seqid=0 2024-12-06T10:17:09,337 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073741950_1126 (size=12301) 2024-12-06T10:17:09,339 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=535 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/B/768901de0e87453ca5fa60deb010d080 2024-12-06T10:17:09,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=30 2024-12-06T10:17:09,349 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=31}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/C/f2956fdd904a4e3ab6d0295f48dd7288 is 50, key is test_row_0/C:col10/1733480226990/Put/seqid=0 2024-12-06T10:17:09,361 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073741951_1127 (size=12301) 2024-12-06T10:17:09,362 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=535 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/C/f2956fdd904a4e3ab6d0295f48dd7288 2024-12-06T10:17:09,368 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/A/52c690c4c6ba44d2b4eee59eb4ada720 as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/A/52c690c4c6ba44d2b4eee59eb4ada720 2024-12-06T10:17:09,373 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/A/52c690c4c6ba44d2b4eee59eb4ada720, entries=150, sequenceid=535, filesize=12.0 K 2024-12-06T10:17:09,375 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/B/768901de0e87453ca5fa60deb010d080 as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/B/768901de0e87453ca5fa60deb010d080 2024-12-06T10:17:09,380 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/B/768901de0e87453ca5fa60deb010d080, entries=150, sequenceid=535, filesize=12.0 K 2024-12-06T10:17:09,382 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/C/f2956fdd904a4e3ab6d0295f48dd7288 as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/C/f2956fdd904a4e3ab6d0295f48dd7288 2024-12-06T10:17:09,388 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/C/f2956fdd904a4e3ab6d0295f48dd7288, entries=150, sequenceid=535, filesize=12.0 K 2024-12-06T10:17:09,388 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.StoreScanner(1000): StoreScanner already closing. There is no need to updateReaders 2024-12-06T10:17:09,389 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.HRegion(3040): Finished flush of dataSize ~127.47 KB/130530, heapSize ~334.69 KB/342720, currentSize=73.80 KB/75570 for b58170106b3730174deb9625aeac23df in 486ms, sequenceid=535, compaction requested=true 2024-12-06T10:17:09,390 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.HRegion(2538): Flush status journal for b58170106b3730174deb9625aeac23df: 2024-12-06T10:17:09,390 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df. 2024-12-06T10:17:09,390 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=31 2024-12-06T10:17:09,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.HMaster(4106): Remote procedure done, pid=31 2024-12-06T10:17:09,392 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=31, resume processing ppid=30 2024-12-06T10:17:09,392 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=31, ppid=30, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 647 msec 2024-12-06T10:17:09,394 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=30, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=30, table=TestAcidGuarantees in 651 msec 2024-12-06T10:17:09,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(8581): Flush requested on b58170106b3730174deb9625aeac23df 2024-12-06T10:17:09,440 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing b58170106b3730174deb9625aeac23df 3/3 column families, dataSize=80.51 KB heapSize=211.69 KB 2024-12-06T10:17:09,441 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b58170106b3730174deb9625aeac23df, store=A 2024-12-06T10:17:09,441 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:17:09,441 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b58170106b3730174deb9625aeac23df, store=B 2024-12-06T10:17:09,441 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:17:09,441 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b58170106b3730174deb9625aeac23df, store=C 2024-12-06T10:17:09,442 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:17:09,447 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/A/3de419a86c8044199977e32e0329576a is 50, key is test_row_0/A:col10/1733480229440/Put/seqid=0 2024-12-06T10:17:09,456 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073741952_1128 (size=12301) 2024-12-06T10:17:09,457 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=551 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/A/3de419a86c8044199977e32e0329576a 2024-12-06T10:17:09,465 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/B/367f1f8fac5446ae9fa26bbefc092e50 is 50, key is test_row_0/B:col10/1733480229440/Put/seqid=0 2024-12-06T10:17:09,477 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073741953_1129 (size=12301) 2024-12-06T10:17:09,478 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=551 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/B/367f1f8fac5446ae9fa26bbefc092e50 2024-12-06T10:17:09,477 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:09,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 282 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51526 deadline: 1733480289473, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:09,479 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:09,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 285 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51536 deadline: 1733480289478, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:09,488 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/C/e29c251816bf47b68a84ffc868304d73 is 50, key is test_row_0/C:col10/1733480229440/Put/seqid=0 2024-12-06T10:17:09,492 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073741954_1130 (size=12301) 2024-12-06T10:17:09,494 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=551 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/C/e29c251816bf47b68a84ffc868304d73 2024-12-06T10:17:09,500 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/A/3de419a86c8044199977e32e0329576a as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/A/3de419a86c8044199977e32e0329576a 2024-12-06T10:17:09,504 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/A/3de419a86c8044199977e32e0329576a, entries=150, sequenceid=551, filesize=12.0 K 2024-12-06T10:17:09,505 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/B/367f1f8fac5446ae9fa26bbefc092e50 as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/B/367f1f8fac5446ae9fa26bbefc092e50 2024-12-06T10:17:09,511 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/B/367f1f8fac5446ae9fa26bbefc092e50, entries=150, sequenceid=551, filesize=12.0 K 2024-12-06T10:17:09,512 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/C/e29c251816bf47b68a84ffc868304d73 as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/C/e29c251816bf47b68a84ffc868304d73 2024-12-06T10:17:09,521 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/C/e29c251816bf47b68a84ffc868304d73, entries=150, sequenceid=551, filesize=12.0 K 2024-12-06T10:17:09,523 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~87.22 KB/89310, heapSize ~229.22 KB/234720, currentSize=114.05 KB/116790 for b58170106b3730174deb9625aeac23df in 83ms, sequenceid=551, compaction requested=true 2024-12-06T10:17:09,523 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for b58170106b3730174deb9625aeac23df: 2024-12-06T10:17:09,523 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-06T10:17:09,524 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store b58170106b3730174deb9625aeac23df:A, priority=-2147483648, current under compaction store size is 1 2024-12-06T10:17:09,524 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T10:17:09,524 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-06T10:17:09,524 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store b58170106b3730174deb9625aeac23df:B, priority=-2147483648, current under compaction store size is 2 2024-12-06T10:17:09,525 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T10:17:09,525 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store b58170106b3730174deb9625aeac23df:C, priority=-2147483648, current under compaction store size is 3 2024-12-06T10:17:09,525 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-06T10:17:09,525 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 50396 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-06T10:17:09,526 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HStore(1540): b58170106b3730174deb9625aeac23df/A is initiating minor compaction (all files) 2024-12-06T10:17:09,526 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of b58170106b3730174deb9625aeac23df/A in TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df. 2024-12-06T10:17:09,526 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/A/1812227c7c0148668e76bdd9f0c7466a, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/A/29126e5084024306a3c2df5317f369b2, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/A/52c690c4c6ba44d2b4eee59eb4ada720, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/A/3de419a86c8044199977e32e0329576a] into tmpdir=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp, totalSize=49.2 K 2024-12-06T10:17:09,526 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1812227c7c0148668e76bdd9f0c7466a, keycount=150, bloomtype=ROW, size=13.2 K, encoding=NONE, compression=NONE, seqNum=495, earliestPutTs=1733480226198 2024-12-06T10:17:09,528 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.Compactor(224): Compacting 29126e5084024306a3c2df5317f369b2, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=510, earliestPutTs=1733480226317 2024-12-06T10:17:09,528 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 50396 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-06T10:17:09,529 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HStore(1540): b58170106b3730174deb9625aeac23df/B is initiating minor compaction (all files) 2024-12-06T10:17:09,529 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of b58170106b3730174deb9625aeac23df/B in TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df. 2024-12-06T10:17:09,529 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/B/abf63d44b7a94ea6a820ecce97a33da9, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/B/45445a5985024dc7843c274e7fe253ab, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/B/768901de0e87453ca5fa60deb010d080, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/B/367f1f8fac5446ae9fa26bbefc092e50] into tmpdir=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp, totalSize=49.2 K 2024-12-06T10:17:09,529 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.Compactor(224): Compacting 52c690c4c6ba44d2b4eee59eb4ada720, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=535, earliestPutTs=1733480226987 2024-12-06T10:17:09,530 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.Compactor(224): Compacting abf63d44b7a94ea6a820ecce97a33da9, keycount=150, bloomtype=ROW, size=13.2 K, encoding=NONE, compression=NONE, seqNum=495, earliestPutTs=1733480226198 2024-12-06T10:17:09,530 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.Compactor(224): Compacting 3de419a86c8044199977e32e0329576a, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=551, earliestPutTs=1733480229123 2024-12-06T10:17:09,531 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.Compactor(224): Compacting 45445a5985024dc7843c274e7fe253ab, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=510, earliestPutTs=1733480226317 2024-12-06T10:17:09,531 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.Compactor(224): Compacting 768901de0e87453ca5fa60deb010d080, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=535, earliestPutTs=1733480226987 2024-12-06T10:17:09,532 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.Compactor(224): Compacting 367f1f8fac5446ae9fa26bbefc092e50, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=551, earliestPutTs=1733480229123 2024-12-06T10:17:09,552 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): b58170106b3730174deb9625aeac23df#A#compaction#116 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-06T10:17:09,553 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/A/edf460481040493c9309874c3f4fe556 is 50, key is test_row_0/A:col10/1733480229440/Put/seqid=0 2024-12-06T10:17:09,558 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): b58170106b3730174deb9625aeac23df#B#compaction#117 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-06T10:17:09,559 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/B/04970250b60b400b88c1e9121a5fa8a9 is 50, key is test_row_0/B:col10/1733480229440/Put/seqid=0 2024-12-06T10:17:09,580 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073741956_1132 (size=13629) 2024-12-06T10:17:09,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(8581): Flush requested on b58170106b3730174deb9625aeac23df 2024-12-06T10:17:09,584 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing b58170106b3730174deb9625aeac23df 3/3 column families, dataSize=120.76 KB heapSize=317.16 KB 2024-12-06T10:17:09,585 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b58170106b3730174deb9625aeac23df, store=A 2024-12-06T10:17:09,585 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:17:09,585 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b58170106b3730174deb9625aeac23df, store=B 2024-12-06T10:17:09,585 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:17:09,585 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b58170106b3730174deb9625aeac23df, store=C 2024-12-06T10:17:09,586 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:17:09,591 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/B/04970250b60b400b88c1e9121a5fa8a9 as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/B/04970250b60b400b88c1e9121a5fa8a9 2024-12-06T10:17:09,594 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073741955_1131 (size=13629) 2024-12-06T10:17:09,598 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/A/0e205bb780604619b07cb58e74251afa is 50, key is test_row_0/A:col10/1733480229459/Put/seqid=0 2024-12-06T10:17:09,602 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in b58170106b3730174deb9625aeac23df/B of b58170106b3730174deb9625aeac23df into 04970250b60b400b88c1e9121a5fa8a9(size=13.3 K), total size for store is 13.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-06T10:17:09,602 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for b58170106b3730174deb9625aeac23df: 2024-12-06T10:17:09,602 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df., storeName=b58170106b3730174deb9625aeac23df/B, priority=12, startTime=1733480229524; duration=0sec 2024-12-06T10:17:09,602 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-06T10:17:09,602 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: b58170106b3730174deb9625aeac23df:B 2024-12-06T10:17:09,602 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-06T10:17:09,604 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 50362 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-06T10:17:09,604 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HStore(1540): b58170106b3730174deb9625aeac23df/C is initiating minor compaction (all files) 2024-12-06T10:17:09,604 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of b58170106b3730174deb9625aeac23df/C in TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df. 2024-12-06T10:17:09,604 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/C/5159ffeb18b04a458191e8b194c0db91, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/C/afef44fc564043058eb987a3b755861c, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/C/f2956fdd904a4e3ab6d0295f48dd7288, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/C/e29c251816bf47b68a84ffc868304d73] into tmpdir=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp, totalSize=49.2 K 2024-12-06T10:17:09,605 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.Compactor(224): Compacting 5159ffeb18b04a458191e8b194c0db91, keycount=150, bloomtype=ROW, size=13.1 K, encoding=NONE, compression=NONE, seqNum=495, earliestPutTs=1733480226198 2024-12-06T10:17:09,605 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.Compactor(224): Compacting afef44fc564043058eb987a3b755861c, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=510, earliestPutTs=1733480226317 2024-12-06T10:17:09,606 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.Compactor(224): Compacting f2956fdd904a4e3ab6d0295f48dd7288, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=535, earliestPutTs=1733480226987 2024-12-06T10:17:09,606 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.Compactor(224): Compacting e29c251816bf47b68a84ffc868304d73, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=551, earliestPutTs=1733480229123 2024-12-06T10:17:09,609 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073741957_1133 (size=14741) 2024-12-06T10:17:09,610 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=573 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/A/0e205bb780604619b07cb58e74251afa 2024-12-06T10:17:09,619 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:09,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 293 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51536 deadline: 1733480289614, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:09,620 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/B/7a18f27548734de6979e8d990f1bec0f is 50, key is test_row_0/B:col10/1733480229459/Put/seqid=0 2024-12-06T10:17:09,622 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): b58170106b3730174deb9625aeac23df#C#compaction#120 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-06T10:17:09,622 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/C/3a866253d32147cc86bfe304c6754c85 is 50, key is test_row_0/C:col10/1733480229440/Put/seqid=0 2024-12-06T10:17:09,621 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:09,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 291 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51526 deadline: 1733480289619, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:09,639 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073741959_1135 (size=13595) 2024-12-06T10:17:09,646 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/C/3a866253d32147cc86bfe304c6754c85 as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/C/3a866253d32147cc86bfe304c6754c85 2024-12-06T10:17:09,654 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073741958_1134 (size=12301) 2024-12-06T10:17:09,655 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=573 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/B/7a18f27548734de6979e8d990f1bec0f 2024-12-06T10:17:09,656 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in b58170106b3730174deb9625aeac23df/C of b58170106b3730174deb9625aeac23df into 3a866253d32147cc86bfe304c6754c85(size=13.3 K), total size for store is 13.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-06T10:17:09,658 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for b58170106b3730174deb9625aeac23df: 2024-12-06T10:17:09,658 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df., storeName=b58170106b3730174deb9625aeac23df/C, priority=12, startTime=1733480229525; duration=0sec 2024-12-06T10:17:09,658 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T10:17:09,658 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: b58170106b3730174deb9625aeac23df:C 2024-12-06T10:17:09,666 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/C/c5dbf061a918442c9899064c80763b8a is 50, key is test_row_0/C:col10/1733480229459/Put/seqid=0 2024-12-06T10:17:09,685 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073741960_1136 (size=12301) 2024-12-06T10:17:09,685 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=573 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/C/c5dbf061a918442c9899064c80763b8a 2024-12-06T10:17:09,694 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/A/0e205bb780604619b07cb58e74251afa as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/A/0e205bb780604619b07cb58e74251afa 2024-12-06T10:17:09,704 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/A/0e205bb780604619b07cb58e74251afa, entries=200, sequenceid=573, filesize=14.4 K 2024-12-06T10:17:09,705 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/B/7a18f27548734de6979e8d990f1bec0f as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/B/7a18f27548734de6979e8d990f1bec0f 2024-12-06T10:17:09,713 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/B/7a18f27548734de6979e8d990f1bec0f, entries=150, sequenceid=573, filesize=12.0 K 2024-12-06T10:17:09,714 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/C/c5dbf061a918442c9899064c80763b8a as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/C/c5dbf061a918442c9899064c80763b8a 2024-12-06T10:17:09,722 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/C/c5dbf061a918442c9899064c80763b8a, entries=150, sequenceid=573, filesize=12.0 K 2024-12-06T10:17:09,723 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~127.47 KB/130530, heapSize ~334.69 KB/342720, currentSize=73.80 KB/75570 for b58170106b3730174deb9625aeac23df in 139ms, sequenceid=573, compaction requested=false 2024-12-06T10:17:09,724 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for b58170106b3730174deb9625aeac23df: 2024-12-06T10:17:09,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(8581): Flush requested on b58170106b3730174deb9625aeac23df 2024-12-06T10:17:09,726 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing b58170106b3730174deb9625aeac23df 3/3 column families, dataSize=80.51 KB heapSize=211.69 KB 2024-12-06T10:17:09,728 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b58170106b3730174deb9625aeac23df, store=A 2024-12-06T10:17:09,728 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:17:09,728 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b58170106b3730174deb9625aeac23df, store=B 2024-12-06T10:17:09,728 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:17:09,728 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b58170106b3730174deb9625aeac23df, store=C 2024-12-06T10:17:09,728 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:17:09,746 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/A/428a296ec1374ff4b902c32736090686 is 50, key is test_row_0/A:col10/1733480229617/Put/seqid=0 2024-12-06T10:17:09,759 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073741961_1137 (size=14741) 2024-12-06T10:17:09,761 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=591 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/A/428a296ec1374ff4b902c32736090686 2024-12-06T10:17:09,772 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/B/0105d4378c384f0083e7e65fe21a76df is 50, key is test_row_0/B:col10/1733480229617/Put/seqid=0 2024-12-06T10:17:09,807 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073741962_1138 (size=12301) 2024-12-06T10:17:09,808 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=591 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/B/0105d4378c384f0083e7e65fe21a76df 2024-12-06T10:17:09,810 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:09,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 303 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51526 deadline: 1733480289806, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:09,812 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:09,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 305 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51536 deadline: 1733480289806, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:09,833 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/C/d2a8322969ea4184bd4e53e54506ecfb is 50, key is test_row_0/C:col10/1733480229617/Put/seqid=0 2024-12-06T10:17:09,846 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073741963_1139 (size=12301) 2024-12-06T10:17:09,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=30 2024-12-06T10:17:09,847 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 30 completed 2024-12-06T10:17:09,848 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=591 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/C/d2a8322969ea4184bd4e53e54506ecfb 2024-12-06T10:17:09,849 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-06T10:17:09,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] procedure2.ProcedureExecutor(1098): Stored pid=32, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=32, table=TestAcidGuarantees 2024-12-06T10:17:09,852 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=32, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=32, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-06T10:17:09,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=32 2024-12-06T10:17:09,853 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=32, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=32, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-06T10:17:09,853 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=33, ppid=32, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-06T10:17:09,860 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/A/428a296ec1374ff4b902c32736090686 as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/A/428a296ec1374ff4b902c32736090686 2024-12-06T10:17:09,866 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/A/428a296ec1374ff4b902c32736090686, entries=200, sequenceid=591, filesize=14.4 K 2024-12-06T10:17:09,869 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/B/0105d4378c384f0083e7e65fe21a76df as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/B/0105d4378c384f0083e7e65fe21a76df 2024-12-06T10:17:09,878 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/B/0105d4378c384f0083e7e65fe21a76df, entries=150, sequenceid=591, filesize=12.0 K 2024-12-06T10:17:09,881 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/C/d2a8322969ea4184bd4e53e54506ecfb as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/C/d2a8322969ea4184bd4e53e54506ecfb 2024-12-06T10:17:09,887 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/C/d2a8322969ea4184bd4e53e54506ecfb, entries=150, sequenceid=591, filesize=12.0 K 2024-12-06T10:17:09,888 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~87.22 KB/89310, heapSize ~229.22 KB/234720, currentSize=120.76 KB/123660 for b58170106b3730174deb9625aeac23df in 162ms, sequenceid=591, compaction requested=true 2024-12-06T10:17:09,889 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for b58170106b3730174deb9625aeac23df: 2024-12-06T10:17:09,889 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store b58170106b3730174deb9625aeac23df:A, priority=-2147483648, current under compaction store size is 1 2024-12-06T10:17:09,889 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T10:17:09,889 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store b58170106b3730174deb9625aeac23df:B, priority=-2147483648, current under compaction store size is 2 2024-12-06T10:17:09,889 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-06T10:17:09,889 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 6 store files, 4 compacting, 2 eligible, 16 blocking 2024-12-06T10:17:09,889 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store b58170106b3730174deb9625aeac23df:C, priority=-2147483648, current under compaction store size is 3 2024-12-06T10:17:09,889 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-12-06T10:17:09,890 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 0 files of size 0 starting at candidate #-1 after considering 0 permutations with 0 in ratio 2024-12-06T10:17:09,890 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.SortedCompactionPolicy(232): Not compacting files because we only have 0 files ready for compaction. Need 3 to initiate. 2024-12-06T10:17:09,890 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.CompactSplit(450): Not compacting TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df. because compaction request was cancelled 2024-12-06T10:17:09,890 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: b58170106b3730174deb9625aeac23df:A 2024-12-06T10:17:09,890 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-06T10:17:09,893 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 38231 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-06T10:17:09,894 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HStore(1540): b58170106b3730174deb9625aeac23df/B is initiating minor compaction (all files) 2024-12-06T10:17:09,894 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of b58170106b3730174deb9625aeac23df/B in TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df. 2024-12-06T10:17:09,894 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/B/04970250b60b400b88c1e9121a5fa8a9, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/B/7a18f27548734de6979e8d990f1bec0f, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/B/0105d4378c384f0083e7e65fe21a76df] into tmpdir=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp, totalSize=37.3 K 2024-12-06T10:17:09,894 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.Compactor(224): Compacting 04970250b60b400b88c1e9121a5fa8a9, keycount=150, bloomtype=ROW, size=13.3 K, encoding=NONE, compression=NONE, seqNum=551, earliestPutTs=1733480229123 2024-12-06T10:17:09,895 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.Compactor(224): Compacting 7a18f27548734de6979e8d990f1bec0f, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=573, earliestPutTs=1733480229459 2024-12-06T10:17:09,896 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.Compactor(224): Compacting 0105d4378c384f0083e7e65fe21a76df, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=591, earliestPutTs=1733480229611 2024-12-06T10:17:09,906 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): b58170106b3730174deb9625aeac23df#B#compaction#125 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-06T10:17:09,906 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/B/33c1e9e6a0674489a8b4a0dcde53c659 is 50, key is test_row_0/B:col10/1733480229617/Put/seqid=0 2024-12-06T10:17:09,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(8581): Flush requested on b58170106b3730174deb9625aeac23df 2024-12-06T10:17:09,916 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing b58170106b3730174deb9625aeac23df 3/3 column families, dataSize=127.47 KB heapSize=334.73 KB 2024-12-06T10:17:09,917 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b58170106b3730174deb9625aeac23df, store=A 2024-12-06T10:17:09,917 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:17:09,917 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b58170106b3730174deb9625aeac23df, store=B 2024-12-06T10:17:09,917 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:17:09,917 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b58170106b3730174deb9625aeac23df, store=C 2024-12-06T10:17:09,917 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:17:09,930 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/A/081e68a661bb470d8f7c20da9301e560 is 50, key is test_row_0/A:col10/1733480229915/Put/seqid=0 2024-12-06T10:17:09,941 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073741964_1140 (size=13731) 2024-12-06T10:17:09,947 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:09,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 311 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51526 deadline: 1733480289944, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:09,948 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:09,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 313 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51536 deadline: 1733480289946, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:09,954 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/B/33c1e9e6a0674489a8b4a0dcde53c659 as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/B/33c1e9e6a0674489a8b4a0dcde53c659 2024-12-06T10:17:09,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=32 2024-12-06T10:17:09,969 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in b58170106b3730174deb9625aeac23df/B of b58170106b3730174deb9625aeac23df into 33c1e9e6a0674489a8b4a0dcde53c659(size=13.4 K), total size for store is 13.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-06T10:17:09,970 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for b58170106b3730174deb9625aeac23df: 2024-12-06T10:17:09,970 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df., storeName=b58170106b3730174deb9625aeac23df/B, priority=13, startTime=1733480229889; duration=0sec 2024-12-06T10:17:09,970 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-06T10:17:09,970 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: b58170106b3730174deb9625aeac23df:B 2024-12-06T10:17:09,970 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-06T10:17:09,974 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 38197 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-06T10:17:09,974 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HStore(1540): b58170106b3730174deb9625aeac23df/C is initiating minor compaction (all files) 2024-12-06T10:17:09,974 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of b58170106b3730174deb9625aeac23df/C in TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df. 2024-12-06T10:17:09,975 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/C/3a866253d32147cc86bfe304c6754c85, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/C/c5dbf061a918442c9899064c80763b8a, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/C/d2a8322969ea4184bd4e53e54506ecfb] into tmpdir=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp, totalSize=37.3 K 2024-12-06T10:17:09,975 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.Compactor(224): Compacting 3a866253d32147cc86bfe304c6754c85, keycount=150, bloomtype=ROW, size=13.3 K, encoding=NONE, compression=NONE, seqNum=551, earliestPutTs=1733480229123 2024-12-06T10:17:09,976 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.Compactor(224): Compacting c5dbf061a918442c9899064c80763b8a, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=573, earliestPutTs=1733480229459 2024-12-06T10:17:09,976 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.Compactor(224): Compacting d2a8322969ea4184bd4e53e54506ecfb, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=591, earliestPutTs=1733480229611 2024-12-06T10:17:09,977 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073741965_1141 (size=14741) 2024-12-06T10:17:09,979 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=613 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/A/081e68a661bb470d8f7c20da9301e560 2024-12-06T10:17:09,989 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/B/4a913af414a74ab69b2300a2e92733f3 is 50, key is test_row_0/B:col10/1733480229915/Put/seqid=0 2024-12-06T10:17:09,993 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): b58170106b3730174deb9625aeac23df#C#compaction#128 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-06T10:17:09,994 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/C/246642f604cb44378303c9609f3c2d8a is 50, key is test_row_0/C:col10/1733480229617/Put/seqid=0 2024-12-06T10:17:10,001 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/A/edf460481040493c9309874c3f4fe556 as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/A/edf460481040493c9309874c3f4fe556 2024-12-06T10:17:10,007 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,33397,1733480204743 2024-12-06T10:17:10,007 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33397 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=33 2024-12-06T10:17:10,007 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df. 2024-12-06T10:17:10,007 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df. as already flushing 2024-12-06T10:17:10,008 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df. 2024-12-06T10:17:10,008 ERROR [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=33}] handler.RSProcedureHandler(58): pid=33 java.io.IOException: Unable to complete flush {ENCODED => b58170106b3730174deb9625aeac23df, NAME => 'TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:17:10,008 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=33 java.io.IOException: Unable to complete flush {ENCODED => b58170106b3730174deb9625aeac23df, NAME => 'TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:17:10,010 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in b58170106b3730174deb9625aeac23df/A of b58170106b3730174deb9625aeac23df into edf460481040493c9309874c3f4fe556(size=13.3 K), total size for store is 42.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-06T10:17:10,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.HMaster(4114): Remote procedure failed, pid=33 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => b58170106b3730174deb9625aeac23df, NAME => 'TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => b58170106b3730174deb9625aeac23df, NAME => 'TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:17:10,010 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for b58170106b3730174deb9625aeac23df: 2024-12-06T10:17:10,010 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df., storeName=b58170106b3730174deb9625aeac23df/A, priority=12, startTime=1733480229523; duration=0sec 2024-12-06T10:17:10,010 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T10:17:10,010 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: b58170106b3730174deb9625aeac23df:A 2024-12-06T10:17:10,037 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073741966_1142 (size=13697) 2024-12-06T10:17:10,054 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/C/246642f604cb44378303c9609f3c2d8a as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/C/246642f604cb44378303c9609f3c2d8a 2024-12-06T10:17:10,054 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073741967_1143 (size=12301) 2024-12-06T10:17:10,054 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:10,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 313 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51526 deadline: 1733480290050, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:10,055 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=613 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/B/4a913af414a74ab69b2300a2e92733f3 2024-12-06T10:17:10,056 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:10,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 315 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51536 deadline: 1733480290050, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:10,066 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in b58170106b3730174deb9625aeac23df/C of b58170106b3730174deb9625aeac23df into 246642f604cb44378303c9609f3c2d8a(size=13.4 K), total size for store is 13.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-06T10:17:10,066 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for b58170106b3730174deb9625aeac23df: 2024-12-06T10:17:10,066 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df., storeName=b58170106b3730174deb9625aeac23df/C, priority=13, startTime=1733480229889; duration=0sec 2024-12-06T10:17:10,066 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T10:17:10,066 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: b58170106b3730174deb9625aeac23df:C 2024-12-06T10:17:10,074 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/C/adc465e27a0b466bb7a9876d1bff0a74 is 50, key is test_row_0/C:col10/1733480229915/Put/seqid=0 2024-12-06T10:17:10,100 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073741968_1144 (size=12301) 2024-12-06T10:17:10,102 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=613 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/C/adc465e27a0b466bb7a9876d1bff0a74 2024-12-06T10:17:10,111 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/A/081e68a661bb470d8f7c20da9301e560 as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/A/081e68a661bb470d8f7c20da9301e560 2024-12-06T10:17:10,118 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/A/081e68a661bb470d8f7c20da9301e560, entries=200, sequenceid=613, filesize=14.4 K 2024-12-06T10:17:10,121 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/B/4a913af414a74ab69b2300a2e92733f3 as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/B/4a913af414a74ab69b2300a2e92733f3 2024-12-06T10:17:10,129 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/B/4a913af414a74ab69b2300a2e92733f3, entries=150, sequenceid=613, filesize=12.0 K 2024-12-06T10:17:10,132 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/C/adc465e27a0b466bb7a9876d1bff0a74 as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/C/adc465e27a0b466bb7a9876d1bff0a74 2024-12-06T10:17:10,137 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/C/adc465e27a0b466bb7a9876d1bff0a74, entries=150, sequenceid=613, filesize=12.0 K 2024-12-06T10:17:10,138 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~127.47 KB/130530, heapSize ~334.69 KB/342720, currentSize=73.80 KB/75570 for b58170106b3730174deb9625aeac23df in 222ms, sequenceid=613, compaction requested=true 2024-12-06T10:17:10,139 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for b58170106b3730174deb9625aeac23df: 2024-12-06T10:17:10,139 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store b58170106b3730174deb9625aeac23df:A, priority=-2147483648, current under compaction store size is 1 2024-12-06T10:17:10,139 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T10:17:10,139 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-06T10:17:10,139 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store b58170106b3730174deb9625aeac23df:B, priority=-2147483648, current under compaction store size is 2 2024-12-06T10:17:10,139 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T10:17:10,139 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 2 store files, 0 compacting, 2 eligible, 16 blocking 2024-12-06T10:17:10,139 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store b58170106b3730174deb9625aeac23df:C, priority=-2147483648, current under compaction store size is 3 2024-12-06T10:17:10,139 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-06T10:17:10,141 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 57852 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-06T10:17:10,141 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HStore(1540): b58170106b3730174deb9625aeac23df/A is initiating minor compaction (all files) 2024-12-06T10:17:10,141 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of b58170106b3730174deb9625aeac23df/A in TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df. 2024-12-06T10:17:10,141 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/A/edf460481040493c9309874c3f4fe556, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/A/0e205bb780604619b07cb58e74251afa, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/A/428a296ec1374ff4b902c32736090686, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/A/081e68a661bb470d8f7c20da9301e560] into tmpdir=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp, totalSize=56.5 K 2024-12-06T10:17:10,141 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 0 files of size 0 starting at candidate #-1 after considering 0 permutations with 0 in ratio 2024-12-06T10:17:10,141 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.SortedCompactionPolicy(232): Not compacting files because we only have 0 files ready for compaction. Need 3 to initiate. 2024-12-06T10:17:10,142 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.CompactSplit(450): Not compacting TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df. because compaction request was cancelled 2024-12-06T10:17:10,142 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: b58170106b3730174deb9625aeac23df:B 2024-12-06T10:17:10,142 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 2 store files, 0 compacting, 2 eligible, 16 blocking 2024-12-06T10:17:10,142 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.Compactor(224): Compacting edf460481040493c9309874c3f4fe556, keycount=150, bloomtype=ROW, size=13.3 K, encoding=NONE, compression=NONE, seqNum=551, earliestPutTs=1733480229123 2024-12-06T10:17:10,142 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.Compactor(224): Compacting 0e205bb780604619b07cb58e74251afa, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=573, earliestPutTs=1733480229459 2024-12-06T10:17:10,143 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 0 files of size 0 starting at candidate #-1 after considering 0 permutations with 0 in ratio 2024-12-06T10:17:10,143 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.SortedCompactionPolicy(232): Not compacting files because we only have 0 files ready for compaction. Need 3 to initiate. 2024-12-06T10:17:10,143 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.Compactor(224): Compacting 428a296ec1374ff4b902c32736090686, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=591, earliestPutTs=1733480229611 2024-12-06T10:17:10,143 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.CompactSplit(450): Not compacting TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df. because compaction request was cancelled 2024-12-06T10:17:10,143 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: b58170106b3730174deb9625aeac23df:C 2024-12-06T10:17:10,145 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.Compactor(224): Compacting 081e68a661bb470d8f7c20da9301e560, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=613, earliestPutTs=1733480229797 2024-12-06T10:17:10,155 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=32 2024-12-06T10:17:10,161 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,33397,1733480204743 2024-12-06T10:17:10,162 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33397 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=33 2024-12-06T10:17:10,162 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df. 2024-12-06T10:17:10,162 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.HRegion(2837): Flushing b58170106b3730174deb9625aeac23df 3/3 column families, dataSize=73.80 KB heapSize=194.11 KB 2024-12-06T10:17:10,163 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): b58170106b3730174deb9625aeac23df#A#compaction#130 average throughput is 2.18 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-06T10:17:10,163 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b58170106b3730174deb9625aeac23df, store=A 2024-12-06T10:17:10,163 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:17:10,163 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b58170106b3730174deb9625aeac23df, store=B 2024-12-06T10:17:10,163 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:17:10,163 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b58170106b3730174deb9625aeac23df, store=C 2024-12-06T10:17:10,163 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:17:10,163 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/A/88a8d849a0664e8ea8229a64dc4304fc is 50, key is test_row_0/A:col10/1733480229915/Put/seqid=0 2024-12-06T10:17:10,174 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/A/3a4817fcbfeb435b840bb223e194b5fd is 50, key is test_row_0/A:col10/1733480229941/Put/seqid=0 2024-12-06T10:17:10,217 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073741969_1145 (size=13765) 2024-12-06T10:17:10,226 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/A/88a8d849a0664e8ea8229a64dc4304fc as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/A/88a8d849a0664e8ea8229a64dc4304fc 2024-12-06T10:17:10,234 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in b58170106b3730174deb9625aeac23df/A of b58170106b3730174deb9625aeac23df into 88a8d849a0664e8ea8229a64dc4304fc(size=13.4 K), total size for store is 13.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-06T10:17:10,234 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for b58170106b3730174deb9625aeac23df: 2024-12-06T10:17:10,234 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df., storeName=b58170106b3730174deb9625aeac23df/A, priority=12, startTime=1733480230139; duration=0sec 2024-12-06T10:17:10,234 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T10:17:10,234 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: b58170106b3730174deb9625aeac23df:A 2024-12-06T10:17:10,238 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073741970_1146 (size=12301) 2024-12-06T10:17:10,240 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=630 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/A/3a4817fcbfeb435b840bb223e194b5fd 2024-12-06T10:17:10,249 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/B/e5286817026545c2a8642d8d1e1aeb74 is 50, key is test_row_0/B:col10/1733480229941/Put/seqid=0 2024-12-06T10:17:10,257 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073741971_1147 (size=12301) 2024-12-06T10:17:10,259 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(8581): Flush requested on b58170106b3730174deb9625aeac23df 2024-12-06T10:17:10,259 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df. as already flushing 2024-12-06T10:17:10,260 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=630 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/B/e5286817026545c2a8642d8d1e1aeb74 2024-12-06T10:17:10,270 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/C/8ca6d147d2b847fb950ade2187b2ed88 is 50, key is test_row_0/C:col10/1733480229941/Put/seqid=0 2024-12-06T10:17:10,281 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073741972_1148 (size=12301) 2024-12-06T10:17:10,288 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:10,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 327 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51536 deadline: 1733480290286, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:10,288 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:10,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 324 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51526 deadline: 1733480290288, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:10,391 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:10,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 329 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51536 deadline: 1733480290389, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:10,391 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:10,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 326 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51526 deadline: 1733480290389, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:10,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=32 2024-12-06T10:17:10,497 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:10,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 161 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51554 deadline: 1733480290496, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:10,499 DEBUG [Thread-149 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8167 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df., hostname=552d6a33fa09,33397,1733480204743, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-06T10:17:10,507 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:10,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 162 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51552 deadline: 1733480290507, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:10,508 DEBUG [Thread-151 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8180 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df., hostname=552d6a33fa09,33397,1733480204743, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-06T10:17:10,515 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:10,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 160 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51570 deadline: 1733480290515, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:10,516 DEBUG [Thread-155 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8184 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df., hostname=552d6a33fa09,33397,1733480204743, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-06T10:17:10,596 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:10,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 331 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51536 deadline: 1733480290592, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:10,596 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:10,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 328 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51526 deadline: 1733480290593, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:10,682 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=630 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/C/8ca6d147d2b847fb950ade2187b2ed88 2024-12-06T10:17:10,689 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/A/3a4817fcbfeb435b840bb223e194b5fd as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/A/3a4817fcbfeb435b840bb223e194b5fd 2024-12-06T10:17:10,693 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/A/3a4817fcbfeb435b840bb223e194b5fd, entries=150, sequenceid=630, filesize=12.0 K 2024-12-06T10:17:10,695 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/B/e5286817026545c2a8642d8d1e1aeb74 as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/B/e5286817026545c2a8642d8d1e1aeb74 2024-12-06T10:17:10,701 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/B/e5286817026545c2a8642d8d1e1aeb74, entries=150, sequenceid=630, filesize=12.0 K 2024-12-06T10:17:10,702 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/C/8ca6d147d2b847fb950ade2187b2ed88 as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/C/8ca6d147d2b847fb950ade2187b2ed88 2024-12-06T10:17:10,708 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/C/8ca6d147d2b847fb950ade2187b2ed88, entries=150, sequenceid=630, filesize=12.0 K 2024-12-06T10:17:10,709 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.HRegion(3040): Finished flush of dataSize ~73.80 KB/75570, heapSize ~194.06 KB/198720, currentSize=127.47 KB/130530 for b58170106b3730174deb9625aeac23df in 547ms, sequenceid=630, compaction requested=true 2024-12-06T10:17:10,709 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.HRegion(2538): Flush status journal for b58170106b3730174deb9625aeac23df: 2024-12-06T10:17:10,709 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df. 2024-12-06T10:17:10,709 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=33 2024-12-06T10:17:10,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.HMaster(4106): Remote procedure done, pid=33 2024-12-06T10:17:10,712 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=33, resume processing ppid=32 2024-12-06T10:17:10,712 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=33, ppid=32, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 858 msec 2024-12-06T10:17:10,714 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=32, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=32, table=TestAcidGuarantees in 863 msec 2024-12-06T10:17:10,759 DEBUG [Thread-162 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x3c3b736e to 127.0.0.1:61610 2024-12-06T10:17:10,759 DEBUG [Thread-160 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x4b5cad1a to 127.0.0.1:61610 2024-12-06T10:17:10,760 DEBUG [Thread-162 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-06T10:17:10,760 DEBUG [Thread-164 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x767a8485 to 127.0.0.1:61610 2024-12-06T10:17:10,760 DEBUG [Thread-160 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-06T10:17:10,760 DEBUG [Thread-164 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-06T10:17:10,763 DEBUG [Thread-166 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x6502d571 to 127.0.0.1:61610 2024-12-06T10:17:10,763 DEBUG [Thread-166 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-06T10:17:10,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(8581): Flush requested on b58170106b3730174deb9625aeac23df 2024-12-06T10:17:10,899 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing b58170106b3730174deb9625aeac23df 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-12-06T10:17:10,899 DEBUG [Thread-153 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x736f1673 to 127.0.0.1:61610 2024-12-06T10:17:10,899 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b58170106b3730174deb9625aeac23df, store=A 2024-12-06T10:17:10,899 DEBUG [Thread-153 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-06T10:17:10,899 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:17:10,900 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b58170106b3730174deb9625aeac23df, store=B 2024-12-06T10:17:10,900 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:17:10,900 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b58170106b3730174deb9625aeac23df, store=C 2024-12-06T10:17:10,900 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:17:10,901 DEBUG [Thread-157 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x3f34ff67 to 127.0.0.1:61610 2024-12-06T10:17:10,901 DEBUG [Thread-157 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-06T10:17:10,904 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/A/b2616d77b12845a8b6f05754af64d28f is 50, key is test_row_0/A:col10/1733480230285/Put/seqid=0 2024-12-06T10:17:10,908 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073741973_1149 (size=12301) 2024-12-06T10:17:10,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=32 2024-12-06T10:17:10,957 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 32 completed 2024-12-06T10:17:11,310 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=654 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/A/b2616d77b12845a8b6f05754af64d28f 2024-12-06T10:17:11,317 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/B/708aa6d11e2c407692610ff7b4f050a7 is 50, key is test_row_0/B:col10/1733480230285/Put/seqid=0 2024-12-06T10:17:11,321 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073741974_1150 (size=12301) 2024-12-06T10:17:11,721 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=654 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/B/708aa6d11e2c407692610ff7b4f050a7 2024-12-06T10:17:11,729 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/C/e6f58ce9ea1248df8f653950802d3cd4 is 50, key is test_row_0/C:col10/1733480230285/Put/seqid=0 2024-12-06T10:17:11,733 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073741975_1151 (size=12301) 2024-12-06T10:17:12,133 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=654 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/C/e6f58ce9ea1248df8f653950802d3cd4 2024-12-06T10:17:12,139 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/A/b2616d77b12845a8b6f05754af64d28f as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/A/b2616d77b12845a8b6f05754af64d28f 2024-12-06T10:17:12,144 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/A/b2616d77b12845a8b6f05754af64d28f, entries=150, sequenceid=654, filesize=12.0 K 2024-12-06T10:17:12,145 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/B/708aa6d11e2c407692610ff7b4f050a7 as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/B/708aa6d11e2c407692610ff7b4f050a7 2024-12-06T10:17:12,149 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/B/708aa6d11e2c407692610ff7b4f050a7, entries=150, sequenceid=654, filesize=12.0 K 2024-12-06T10:17:12,149 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/C/e6f58ce9ea1248df8f653950802d3cd4 as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/C/e6f58ce9ea1248df8f653950802d3cd4 2024-12-06T10:17:12,153 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/C/e6f58ce9ea1248df8f653950802d3cd4, entries=150, sequenceid=654, filesize=12.0 K 2024-12-06T10:17:12,154 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=6.71 KB/6870 for b58170106b3730174deb9625aeac23df in 1255ms, sequenceid=654, compaction requested=true 2024-12-06T10:17:12,154 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for b58170106b3730174deb9625aeac23df: 2024-12-06T10:17:12,154 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store b58170106b3730174deb9625aeac23df:A, priority=-2147483648, current under compaction store size is 1 2024-12-06T10:17:12,154 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T10:17:12,155 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store b58170106b3730174deb9625aeac23df:B, priority=-2147483648, current under compaction store size is 2 2024-12-06T10:17:12,155 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-06T10:17:12,155 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T10:17:12,155 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-06T10:17:12,155 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store b58170106b3730174deb9625aeac23df:C, priority=-2147483648, current under compaction store size is 3 2024-12-06T10:17:12,155 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-06T10:17:12,156 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 38367 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-06T10:17:12,156 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HStore(1540): b58170106b3730174deb9625aeac23df/A is initiating minor compaction (all files) 2024-12-06T10:17:12,156 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of b58170106b3730174deb9625aeac23df/A in TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df. 2024-12-06T10:17:12,156 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 50634 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-06T10:17:12,156 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/A/88a8d849a0664e8ea8229a64dc4304fc, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/A/3a4817fcbfeb435b840bb223e194b5fd, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/A/b2616d77b12845a8b6f05754af64d28f] into tmpdir=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp, totalSize=37.5 K 2024-12-06T10:17:12,156 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HStore(1540): b58170106b3730174deb9625aeac23df/B is initiating minor compaction (all files) 2024-12-06T10:17:12,156 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of b58170106b3730174deb9625aeac23df/B in TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df. 2024-12-06T10:17:12,156 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/B/33c1e9e6a0674489a8b4a0dcde53c659, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/B/4a913af414a74ab69b2300a2e92733f3, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/B/e5286817026545c2a8642d8d1e1aeb74, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/B/708aa6d11e2c407692610ff7b4f050a7] into tmpdir=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp, totalSize=49.4 K 2024-12-06T10:17:12,156 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.Compactor(224): Compacting 88a8d849a0664e8ea8229a64dc4304fc, keycount=150, bloomtype=ROW, size=13.4 K, encoding=NONE, compression=NONE, seqNum=613, earliestPutTs=1733480229797 2024-12-06T10:17:12,157 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.Compactor(224): Compacting 33c1e9e6a0674489a8b4a0dcde53c659, keycount=150, bloomtype=ROW, size=13.4 K, encoding=NONE, compression=NONE, seqNum=591, earliestPutTs=1733480229611 2024-12-06T10:17:12,157 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.Compactor(224): Compacting 4a913af414a74ab69b2300a2e92733f3, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=613, earliestPutTs=1733480229797 2024-12-06T10:17:12,157 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.Compactor(224): Compacting 3a4817fcbfeb435b840bb223e194b5fd, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=630, earliestPutTs=1733480229931 2024-12-06T10:17:12,157 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.Compactor(224): Compacting b2616d77b12845a8b6f05754af64d28f, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=654, earliestPutTs=1733480230285 2024-12-06T10:17:12,157 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.Compactor(224): Compacting e5286817026545c2a8642d8d1e1aeb74, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=630, earliestPutTs=1733480229931 2024-12-06T10:17:12,158 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.Compactor(224): Compacting 708aa6d11e2c407692610ff7b4f050a7, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=654, earliestPutTs=1733480230285 2024-12-06T10:17:12,164 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): b58170106b3730174deb9625aeac23df#A#compaction#137 average throughput is unlimited, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-06T10:17:12,165 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/A/3b6bd4d2027342b4b14162488638ec83 is 50, key is test_row_0/A:col10/1733480230285/Put/seqid=0 2024-12-06T10:17:12,167 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): b58170106b3730174deb9625aeac23df#B#compaction#138 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-06T10:17:12,167 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/B/6cca589e8edb4e1896ab3d99c3af220e is 50, key is test_row_0/B:col10/1733480230285/Put/seqid=0 2024-12-06T10:17:12,169 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073741976_1152 (size=13867) 2024-12-06T10:17:12,171 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073741977_1153 (size=13867) 2024-12-06T10:17:12,575 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/A/3b6bd4d2027342b4b14162488638ec83 as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/A/3b6bd4d2027342b4b14162488638ec83 2024-12-06T10:17:12,576 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/B/6cca589e8edb4e1896ab3d99c3af220e as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/B/6cca589e8edb4e1896ab3d99c3af220e 2024-12-06T10:17:12,580 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in b58170106b3730174deb9625aeac23df/A of b58170106b3730174deb9625aeac23df into 3b6bd4d2027342b4b14162488638ec83(size=13.5 K), total size for store is 13.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-06T10:17:12,580 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in b58170106b3730174deb9625aeac23df/B of b58170106b3730174deb9625aeac23df into 6cca589e8edb4e1896ab3d99c3af220e(size=13.5 K), total size for store is 13.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-06T10:17:12,580 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for b58170106b3730174deb9625aeac23df: 2024-12-06T10:17:12,580 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df., storeName=b58170106b3730174deb9625aeac23df/A, priority=13, startTime=1733480232154; duration=0sec 2024-12-06T10:17:12,580 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for b58170106b3730174deb9625aeac23df: 2024-12-06T10:17:12,581 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df., storeName=b58170106b3730174deb9625aeac23df/B, priority=12, startTime=1733480232155; duration=0sec 2024-12-06T10:17:12,581 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-06T10:17:12,581 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: b58170106b3730174deb9625aeac23df:A 2024-12-06T10:17:12,581 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T10:17:12,581 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-06T10:17:12,581 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: b58170106b3730174deb9625aeac23df:B 2024-12-06T10:17:12,582 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 50600 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-06T10:17:12,582 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HStore(1540): b58170106b3730174deb9625aeac23df/C is initiating minor compaction (all files) 2024-12-06T10:17:12,582 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of b58170106b3730174deb9625aeac23df/C in TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df. 2024-12-06T10:17:12,582 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/C/246642f604cb44378303c9609f3c2d8a, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/C/adc465e27a0b466bb7a9876d1bff0a74, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/C/8ca6d147d2b847fb950ade2187b2ed88, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/C/e6f58ce9ea1248df8f653950802d3cd4] into tmpdir=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp, totalSize=49.4 K 2024-12-06T10:17:12,583 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.Compactor(224): Compacting 246642f604cb44378303c9609f3c2d8a, keycount=150, bloomtype=ROW, size=13.4 K, encoding=NONE, compression=NONE, seqNum=591, earliestPutTs=1733480229611 2024-12-06T10:17:12,583 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.Compactor(224): Compacting adc465e27a0b466bb7a9876d1bff0a74, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=613, earliestPutTs=1733480229797 2024-12-06T10:17:12,583 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.Compactor(224): Compacting 8ca6d147d2b847fb950ade2187b2ed88, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=630, earliestPutTs=1733480229931 2024-12-06T10:17:12,584 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.Compactor(224): Compacting e6f58ce9ea1248df8f653950802d3cd4, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=654, earliestPutTs=1733480230285 2024-12-06T10:17:12,592 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): b58170106b3730174deb9625aeac23df#C#compaction#139 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-06T10:17:12,593 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/C/28010728ada1448d82cf9532af8585d9 is 50, key is test_row_0/C:col10/1733480230285/Put/seqid=0 2024-12-06T10:17:12,596 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073741978_1154 (size=13833) 2024-12-06T10:17:13,002 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/C/28010728ada1448d82cf9532af8585d9 as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/C/28010728ada1448d82cf9532af8585d9 2024-12-06T10:17:13,007 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in b58170106b3730174deb9625aeac23df/C of b58170106b3730174deb9625aeac23df into 28010728ada1448d82cf9532af8585d9(size=13.5 K), total size for store is 13.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-06T10:17:13,007 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for b58170106b3730174deb9625aeac23df: 2024-12-06T10:17:13,007 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df., storeName=b58170106b3730174deb9625aeac23df/C, priority=12, startTime=1733480232155; duration=0sec 2024-12-06T10:17:13,008 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T10:17:13,008 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: b58170106b3730174deb9625aeac23df:C 2024-12-06T10:17:13,055 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-06T10:17:19,813 DEBUG [BootstrapNodeManager {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-06T10:17:19,815 INFO [RS-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:53594, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins.hfs.0 (auth:SIMPLE), service=RegionServerStatusService 2024-12-06T10:17:20,528 DEBUG [Thread-149 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x79d38d10 to 127.0.0.1:61610 2024-12-06T10:17:20,528 DEBUG [Thread-149 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-06T10:17:20,534 DEBUG [Thread-155 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x4ee2166f to 127.0.0.1:61610 2024-12-06T10:17:20,534 DEBUG [Thread-155 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-06T10:17:20,610 DEBUG [Thread-151 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x6c63ae4e to 127.0.0.1:61610 2024-12-06T10:17:20,610 DEBUG [Thread-151 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-06T10:17:20,610 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(392): Finished test. Writers: 2024-12-06T10:17:20,610 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 59 2024-12-06T10:17:20,610 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 60 2024-12-06T10:17:20,611 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 170 2024-12-06T10:17:20,611 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 58 2024-12-06T10:17:20,611 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 173 2024-12-06T10:17:20,611 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(396): Readers: 2024-12-06T10:17:20,611 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 5796 2024-12-06T10:17:20,611 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 5599 2024-12-06T10:17:20,611 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(400): Scanners: 2024-12-06T10:17:20,611 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 2509 2024-12-06T10:17:20,611 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 7527 rows 2024-12-06T10:17:20,611 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 2498 2024-12-06T10:17:20,611 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 7490 rows 2024-12-06T10:17:20,611 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-12-06T10:17:20,611 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x7fdf5682 to 127.0.0.1:61610 2024-12-06T10:17:20,611 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-06T10:17:20,615 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of TestAcidGuarantees 2024-12-06T10:17:20,620 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.2 disable TestAcidGuarantees 2024-12-06T10:17:20,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] procedure2.ProcedureExecutor(1098): Stored pid=34, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=TestAcidGuarantees 2024-12-06T10:17:20,628 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733480240628"}]},"ts":"1733480240628"} 2024-12-06T10:17:20,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=34 2024-12-06T10:17:20,629 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLING in hbase:meta 2024-12-06T10:17:20,632 INFO [PEWorker-3 {}] procedure.DisableTableProcedure(284): Set TestAcidGuarantees to state=DISABLING 2024-12-06T10:17:20,633 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=35, ppid=34, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=TestAcidGuarantees}] 2024-12-06T10:17:20,638 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=36, ppid=35, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=b58170106b3730174deb9625aeac23df, UNASSIGN}] 2024-12-06T10:17:20,639 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=36, ppid=35, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=b58170106b3730174deb9625aeac23df, UNASSIGN 2024-12-06T10:17:20,640 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=36 updating hbase:meta row=b58170106b3730174deb9625aeac23df, regionState=CLOSING, regionLocation=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:20,641 DEBUG [PEWorker-5 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-06T10:17:20,641 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=37, ppid=36, state=RUNNABLE; CloseRegionProcedure b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743}] 2024-12-06T10:17:20,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=34 2024-12-06T10:17:20,796 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,33397,1733480204743 2024-12-06T10:17:20,798 INFO [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION, pid=37}] handler.UnassignRegionHandler(124): Close b58170106b3730174deb9625aeac23df 2024-12-06T10:17:20,798 DEBUG [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION, pid=37}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-06T10:17:20,799 DEBUG [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION, pid=37}] regionserver.HRegion(1681): Closing b58170106b3730174deb9625aeac23df, disabling compactions & flushes 2024-12-06T10:17:20,799 INFO [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION, pid=37}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df. 2024-12-06T10:17:20,799 DEBUG [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION, pid=37}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df. 2024-12-06T10:17:20,799 DEBUG [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION, pid=37}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df. after waiting 0 ms 2024-12-06T10:17:20,799 DEBUG [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION, pid=37}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df. 2024-12-06T10:17:20,799 INFO [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION, pid=37}] regionserver.HRegion(2837): Flushing b58170106b3730174deb9625aeac23df 3/3 column families, dataSize=26.84 KB heapSize=71.06 KB 2024-12-06T10:17:20,799 DEBUG [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION, pid=37}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b58170106b3730174deb9625aeac23df, store=A 2024-12-06T10:17:20,799 DEBUG [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION, pid=37}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:17:20,799 DEBUG [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION, pid=37}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b58170106b3730174deb9625aeac23df, store=B 2024-12-06T10:17:20,799 DEBUG [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION, pid=37}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:17:20,799 DEBUG [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION, pid=37}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b58170106b3730174deb9625aeac23df, store=C 2024-12-06T10:17:20,799 DEBUG [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION, pid=37}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:17:20,804 DEBUG [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION, pid=37}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/A/fa9906c3e7e741649718eb93d19fd15a is 50, key is test_row_0/A:col10/1733480240609/Put/seqid=0 2024-12-06T10:17:20,808 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073741979_1155 (size=7415) 2024-12-06T10:17:20,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=34 2024-12-06T10:17:21,209 INFO [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION, pid=37}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=8.95 KB at sequenceid=664 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/A/fa9906c3e7e741649718eb93d19fd15a 2024-12-06T10:17:21,217 DEBUG [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION, pid=37}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/B/35057a9a73514bccba32a571f4bb866f is 50, key is test_row_0/B:col10/1733480240609/Put/seqid=0 2024-12-06T10:17:21,221 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073741980_1156 (size=7415) 2024-12-06T10:17:21,231 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=34 2024-12-06T10:17:21,622 INFO [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION, pid=37}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=8.95 KB at sequenceid=664 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/B/35057a9a73514bccba32a571f4bb866f 2024-12-06T10:17:21,631 DEBUG [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION, pid=37}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/C/5fdd41f139d54516a20ea9a2d37044fb is 50, key is test_row_0/C:col10/1733480240609/Put/seqid=0 2024-12-06T10:17:21,635 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073741981_1157 (size=7415) 2024-12-06T10:17:21,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=34 2024-12-06T10:17:22,036 INFO [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION, pid=37}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=8.95 KB at sequenceid=664 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/C/5fdd41f139d54516a20ea9a2d37044fb 2024-12-06T10:17:22,041 DEBUG [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION, pid=37}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/A/fa9906c3e7e741649718eb93d19fd15a as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/A/fa9906c3e7e741649718eb93d19fd15a 2024-12-06T10:17:22,045 INFO [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION, pid=37}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/A/fa9906c3e7e741649718eb93d19fd15a, entries=50, sequenceid=664, filesize=7.2 K 2024-12-06T10:17:22,046 DEBUG [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION, pid=37}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/B/35057a9a73514bccba32a571f4bb866f as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/B/35057a9a73514bccba32a571f4bb866f 2024-12-06T10:17:22,050 INFO [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION, pid=37}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/B/35057a9a73514bccba32a571f4bb866f, entries=50, sequenceid=664, filesize=7.2 K 2024-12-06T10:17:22,051 DEBUG [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION, pid=37}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/.tmp/C/5fdd41f139d54516a20ea9a2d37044fb as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/C/5fdd41f139d54516a20ea9a2d37044fb 2024-12-06T10:17:22,055 INFO [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION, pid=37}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/C/5fdd41f139d54516a20ea9a2d37044fb, entries=50, sequenceid=664, filesize=7.2 K 2024-12-06T10:17:22,056 INFO [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION, pid=37}] regionserver.HRegion(3040): Finished flush of dataSize ~26.84 KB/27480, heapSize ~71.02 KB/72720, currentSize=0 B/0 for b58170106b3730174deb9625aeac23df in 1257ms, sequenceid=664, compaction requested=false 2024-12-06T10:17:22,056 DEBUG [StoreCloser-TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/A/1231fe29209a426188f997b44fd54d78, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/A/e853622d9a2a4fd785b1d3f1b013a4dc, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/A/fb3cfed1a8dc4b6699125f8bb94ccc2f, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/A/c83210370f75493ea2bcbe5fa160cf0a, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/A/45917a5bc9dd488db25c8d4d4769a9f5, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/A/0fa2abd1dd764753b86c8980f3a423fa, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/A/d44069da13fd43bf84288f031064c82c, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/A/c20b4e29514d40cbbfbde11c803d5472, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/A/13c7b26f1c4d4c39b8ebda1b80b2c05e, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/A/46a89faee0ab42e3a7fd35d499c0ec83, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/A/14238d47ee5a4553959923f9040cc238, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/A/da3e59c7dee24d62ac5b2e0b43047335, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/A/7f1ae794621149efbc464f6c7e29b5f7, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/A/901738c6447346059385ada39f9e9a90, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/A/0342b1d298d0456580b390e739ff80a9, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/A/6022b549f2454721afd9204e1adae00f, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/A/6db240beb05749b7905d387c074d6d66, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/A/bc0e1caccf7b49afa8ad2a743616c193, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/A/32362215a25f4a30a943f96d2556b778, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/A/9f93097ac9394f4e83ca0f0c2c8de661, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/A/cb088467176847109c0605b1d39d469c, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/A/857ff39530824cd78ad83926b85d6fb2, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/A/03d05db48d62407fa1967e561b01dd71, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/A/265cdb6536054b4da97d5ca72c549230, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/A/ed8e44593bb74a74aa8f8992a6cff723, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/A/512a0fd606244571a39b85a30fd299b4, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/A/fa0dfd3ce8914714ad27e99972d327be, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/A/b29606ead712487fb468bc685526f5bb, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/A/71c720d63d934fa5ba59419363e07690, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/A/6d08b33d1714482c9a78c3bf04d865f8, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/A/9ccaa35752cc44c88e3a6cf73b915d4c, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/A/618fa0af1291438ca45b7b0201ca03a8, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/A/0d9c3744b3824f9f8c431a6525b635b9, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/A/8ed94023b79d4db2b3f57a5574d97ec5, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/A/9131a8dbfafd4ac3a779bfd512f9658a, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/A/1812227c7c0148668e76bdd9f0c7466a, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/A/29126e5084024306a3c2df5317f369b2, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/A/52c690c4c6ba44d2b4eee59eb4ada720, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/A/edf460481040493c9309874c3f4fe556, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/A/3de419a86c8044199977e32e0329576a, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/A/0e205bb780604619b07cb58e74251afa, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/A/428a296ec1374ff4b902c32736090686, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/A/081e68a661bb470d8f7c20da9301e560, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/A/88a8d849a0664e8ea8229a64dc4304fc, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/A/3a4817fcbfeb435b840bb223e194b5fd, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/A/b2616d77b12845a8b6f05754af64d28f] to archive 2024-12-06T10:17:22,060 DEBUG [StoreCloser-TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-12-06T10:17:22,065 DEBUG [StoreCloser-TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/A/1231fe29209a426188f997b44fd54d78 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/A/1231fe29209a426188f997b44fd54d78 2024-12-06T10:17:22,067 DEBUG [StoreCloser-TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/A/e853622d9a2a4fd785b1d3f1b013a4dc to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/A/e853622d9a2a4fd785b1d3f1b013a4dc 2024-12-06T10:17:22,068 DEBUG [StoreCloser-TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/A/fb3cfed1a8dc4b6699125f8bb94ccc2f to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/A/fb3cfed1a8dc4b6699125f8bb94ccc2f 2024-12-06T10:17:22,069 DEBUG [StoreCloser-TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/A/c83210370f75493ea2bcbe5fa160cf0a to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/A/c83210370f75493ea2bcbe5fa160cf0a 2024-12-06T10:17:22,070 DEBUG [StoreCloser-TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/A/45917a5bc9dd488db25c8d4d4769a9f5 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/A/45917a5bc9dd488db25c8d4d4769a9f5 2024-12-06T10:17:22,072 DEBUG [StoreCloser-TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/A/0fa2abd1dd764753b86c8980f3a423fa to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/A/0fa2abd1dd764753b86c8980f3a423fa 2024-12-06T10:17:22,073 DEBUG [StoreCloser-TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/A/d44069da13fd43bf84288f031064c82c to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/A/d44069da13fd43bf84288f031064c82c 2024-12-06T10:17:22,074 DEBUG [StoreCloser-TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/A/c20b4e29514d40cbbfbde11c803d5472 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/A/c20b4e29514d40cbbfbde11c803d5472 2024-12-06T10:17:22,075 DEBUG [StoreCloser-TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/A/13c7b26f1c4d4c39b8ebda1b80b2c05e to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/A/13c7b26f1c4d4c39b8ebda1b80b2c05e 2024-12-06T10:17:22,077 DEBUG [StoreCloser-TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/A/46a89faee0ab42e3a7fd35d499c0ec83 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/A/46a89faee0ab42e3a7fd35d499c0ec83 2024-12-06T10:17:22,078 DEBUG [StoreCloser-TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/A/14238d47ee5a4553959923f9040cc238 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/A/14238d47ee5a4553959923f9040cc238 2024-12-06T10:17:22,079 DEBUG [StoreCloser-TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/A/da3e59c7dee24d62ac5b2e0b43047335 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/A/da3e59c7dee24d62ac5b2e0b43047335 2024-12-06T10:17:22,080 DEBUG [StoreCloser-TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/A/7f1ae794621149efbc464f6c7e29b5f7 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/A/7f1ae794621149efbc464f6c7e29b5f7 2024-12-06T10:17:22,081 DEBUG [StoreCloser-TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/A/901738c6447346059385ada39f9e9a90 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/A/901738c6447346059385ada39f9e9a90 2024-12-06T10:17:22,083 DEBUG [StoreCloser-TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/A/0342b1d298d0456580b390e739ff80a9 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/A/0342b1d298d0456580b390e739ff80a9 2024-12-06T10:17:22,084 DEBUG [StoreCloser-TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/A/6022b549f2454721afd9204e1adae00f to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/A/6022b549f2454721afd9204e1adae00f 2024-12-06T10:17:22,085 DEBUG [StoreCloser-TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/A/6db240beb05749b7905d387c074d6d66 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/A/6db240beb05749b7905d387c074d6d66 2024-12-06T10:17:22,086 DEBUG [StoreCloser-TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/A/bc0e1caccf7b49afa8ad2a743616c193 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/A/bc0e1caccf7b49afa8ad2a743616c193 2024-12-06T10:17:22,088 DEBUG [StoreCloser-TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/A/32362215a25f4a30a943f96d2556b778 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/A/32362215a25f4a30a943f96d2556b778 2024-12-06T10:17:22,089 DEBUG [StoreCloser-TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/A/9f93097ac9394f4e83ca0f0c2c8de661 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/A/9f93097ac9394f4e83ca0f0c2c8de661 2024-12-06T10:17:22,090 DEBUG [StoreCloser-TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/A/cb088467176847109c0605b1d39d469c to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/A/cb088467176847109c0605b1d39d469c 2024-12-06T10:17:22,091 DEBUG [StoreCloser-TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/A/857ff39530824cd78ad83926b85d6fb2 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/A/857ff39530824cd78ad83926b85d6fb2 2024-12-06T10:17:22,093 DEBUG [StoreCloser-TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/A/03d05db48d62407fa1967e561b01dd71 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/A/03d05db48d62407fa1967e561b01dd71 2024-12-06T10:17:22,094 DEBUG [StoreCloser-TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/A/265cdb6536054b4da97d5ca72c549230 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/A/265cdb6536054b4da97d5ca72c549230 2024-12-06T10:17:22,095 DEBUG [StoreCloser-TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/A/ed8e44593bb74a74aa8f8992a6cff723 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/A/ed8e44593bb74a74aa8f8992a6cff723 2024-12-06T10:17:22,096 DEBUG [StoreCloser-TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/A/512a0fd606244571a39b85a30fd299b4 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/A/512a0fd606244571a39b85a30fd299b4 2024-12-06T10:17:22,098 DEBUG [StoreCloser-TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/A/fa0dfd3ce8914714ad27e99972d327be to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/A/fa0dfd3ce8914714ad27e99972d327be 2024-12-06T10:17:22,099 DEBUG [StoreCloser-TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/A/b29606ead712487fb468bc685526f5bb to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/A/b29606ead712487fb468bc685526f5bb 2024-12-06T10:17:22,100 DEBUG [StoreCloser-TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/A/71c720d63d934fa5ba59419363e07690 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/A/71c720d63d934fa5ba59419363e07690 2024-12-06T10:17:22,101 DEBUG [StoreCloser-TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/A/6d08b33d1714482c9a78c3bf04d865f8 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/A/6d08b33d1714482c9a78c3bf04d865f8 2024-12-06T10:17:22,103 DEBUG [StoreCloser-TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/A/9ccaa35752cc44c88e3a6cf73b915d4c to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/A/9ccaa35752cc44c88e3a6cf73b915d4c 2024-12-06T10:17:22,104 DEBUG [StoreCloser-TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/A/618fa0af1291438ca45b7b0201ca03a8 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/A/618fa0af1291438ca45b7b0201ca03a8 2024-12-06T10:17:22,105 DEBUG [StoreCloser-TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/A/0d9c3744b3824f9f8c431a6525b635b9 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/A/0d9c3744b3824f9f8c431a6525b635b9 2024-12-06T10:17:22,106 DEBUG [StoreCloser-TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/A/8ed94023b79d4db2b3f57a5574d97ec5 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/A/8ed94023b79d4db2b3f57a5574d97ec5 2024-12-06T10:17:22,108 DEBUG [StoreCloser-TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/A/9131a8dbfafd4ac3a779bfd512f9658a to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/A/9131a8dbfafd4ac3a779bfd512f9658a 2024-12-06T10:17:22,109 DEBUG [StoreCloser-TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/A/1812227c7c0148668e76bdd9f0c7466a to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/A/1812227c7c0148668e76bdd9f0c7466a 2024-12-06T10:17:22,111 DEBUG [StoreCloser-TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/A/29126e5084024306a3c2df5317f369b2 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/A/29126e5084024306a3c2df5317f369b2 2024-12-06T10:17:22,112 DEBUG [StoreCloser-TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/A/52c690c4c6ba44d2b4eee59eb4ada720 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/A/52c690c4c6ba44d2b4eee59eb4ada720 2024-12-06T10:17:22,114 DEBUG [StoreCloser-TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/A/edf460481040493c9309874c3f4fe556 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/A/edf460481040493c9309874c3f4fe556 2024-12-06T10:17:22,115 DEBUG [StoreCloser-TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/A/3de419a86c8044199977e32e0329576a to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/A/3de419a86c8044199977e32e0329576a 2024-12-06T10:17:22,117 DEBUG [StoreCloser-TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/A/0e205bb780604619b07cb58e74251afa to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/A/0e205bb780604619b07cb58e74251afa 2024-12-06T10:17:22,118 DEBUG [StoreCloser-TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/A/428a296ec1374ff4b902c32736090686 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/A/428a296ec1374ff4b902c32736090686 2024-12-06T10:17:22,119 DEBUG [StoreCloser-TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/A/081e68a661bb470d8f7c20da9301e560 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/A/081e68a661bb470d8f7c20da9301e560 2024-12-06T10:17:22,120 DEBUG [StoreCloser-TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/A/88a8d849a0664e8ea8229a64dc4304fc to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/A/88a8d849a0664e8ea8229a64dc4304fc 2024-12-06T10:17:22,121 DEBUG [StoreCloser-TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/A/3a4817fcbfeb435b840bb223e194b5fd to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/A/3a4817fcbfeb435b840bb223e194b5fd 2024-12-06T10:17:22,123 DEBUG [StoreCloser-TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/A/b2616d77b12845a8b6f05754af64d28f to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/A/b2616d77b12845a8b6f05754af64d28f 2024-12-06T10:17:22,141 DEBUG [StoreCloser-TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/B/76f1f07f7cdb4edd8f1b105cb20b08e3, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/B/fd118e428104484ea0db3da9294162d9, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/B/76936e39d6634f87b15c3dae405e3873, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/B/590dcd3497664918aeef83bf8b7086eb, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/B/6140b90b5d5341be9ea2131a7ccf3848, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/B/4c74bf0f893b48b381eea9651b1fde7a, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/B/9d7e4d043abe482f91f20f6ed1f3dc86, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/B/d4558731ae29441b98c9af94346c0a30, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/B/44191a3bc0ce41f98b436888b6d1ef4a, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/B/e2d9d9b845eb4e268e0e22b20edd1a8d, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/B/67678503e1dd4dc7be58d3a3eec00817, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/B/f59976c62129486982cf5127381d7e8e, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/B/35988e34183c474c9b7a7604d14c22a4, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/B/fe567e62b4a5471a98ca002af5cf538a, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/B/d7115ea069a04e28a8a3e85c3c9b8d5d, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/B/bdc576705f1845c4afb4f0ab9c3ece49, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/B/2203c0b90d6a451586516dcf8ce40eca, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/B/250670863cb64b3db3b22dba6ac0e464, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/B/98ef89a372d64938a0ac4fbd1a069481, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/B/2f8f489fc12e476fbc2ff7ad4184cb69, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/B/b01a7a033c9b43818214700631a18104, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/B/cb41f0ef1ffb4a28b396a7297574694e, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/B/978ef75d9cb54cff9129245634d6dc17, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/B/656296dee1b241ffbc152db35de344ce, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/B/86b5691360ff4303bf0720dfe8e618e7, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/B/072a8e0596e044fe81a5bddc9ddcb1e3, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/B/ab3d4085edbe4e8faf8427763a14a4a8, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/B/de58abee26c5402a80237519b3e00e40, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/B/516c9d5363fe49e88807697ae8097a79, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/B/b90ebb3dc8df4af58e1aee44dabac07a, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/B/40862726ae484cdfa9378e2dadf82747, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/B/f3abf245f5734cf2bb04569c827d32a9, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/B/078bf0d48a234dbfab0418619e0b7b26, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/B/cee88b2fe49946df96ca7732a8d48b80, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/B/abf63d44b7a94ea6a820ecce97a33da9, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/B/0749e4107b9d452c8cd2e0c157be3de3, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/B/45445a5985024dc7843c274e7fe253ab, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/B/768901de0e87453ca5fa60deb010d080, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/B/04970250b60b400b88c1e9121a5fa8a9, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/B/367f1f8fac5446ae9fa26bbefc092e50, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/B/7a18f27548734de6979e8d990f1bec0f, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/B/33c1e9e6a0674489a8b4a0dcde53c659, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/B/0105d4378c384f0083e7e65fe21a76df, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/B/4a913af414a74ab69b2300a2e92733f3, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/B/e5286817026545c2a8642d8d1e1aeb74, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/B/708aa6d11e2c407692610ff7b4f050a7] to archive 2024-12-06T10:17:22,142 DEBUG [StoreCloser-TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-12-06T10:17:22,144 DEBUG [StoreCloser-TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/B/76f1f07f7cdb4edd8f1b105cb20b08e3 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/B/76f1f07f7cdb4edd8f1b105cb20b08e3 2024-12-06T10:17:22,146 DEBUG [StoreCloser-TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/B/fd118e428104484ea0db3da9294162d9 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/B/fd118e428104484ea0db3da9294162d9 2024-12-06T10:17:22,147 DEBUG [StoreCloser-TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/B/76936e39d6634f87b15c3dae405e3873 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/B/76936e39d6634f87b15c3dae405e3873 2024-12-06T10:17:22,148 DEBUG [StoreCloser-TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/B/590dcd3497664918aeef83bf8b7086eb to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/B/590dcd3497664918aeef83bf8b7086eb 2024-12-06T10:17:22,150 DEBUG [StoreCloser-TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/B/6140b90b5d5341be9ea2131a7ccf3848 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/B/6140b90b5d5341be9ea2131a7ccf3848 2024-12-06T10:17:22,151 DEBUG [StoreCloser-TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/B/4c74bf0f893b48b381eea9651b1fde7a to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/B/4c74bf0f893b48b381eea9651b1fde7a 2024-12-06T10:17:22,152 DEBUG [StoreCloser-TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/B/9d7e4d043abe482f91f20f6ed1f3dc86 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/B/9d7e4d043abe482f91f20f6ed1f3dc86 2024-12-06T10:17:22,153 DEBUG [StoreCloser-TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/B/d4558731ae29441b98c9af94346c0a30 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/B/d4558731ae29441b98c9af94346c0a30 2024-12-06T10:17:22,155 DEBUG [StoreCloser-TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/B/44191a3bc0ce41f98b436888b6d1ef4a to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/B/44191a3bc0ce41f98b436888b6d1ef4a 2024-12-06T10:17:22,156 DEBUG [StoreCloser-TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/B/e2d9d9b845eb4e268e0e22b20edd1a8d to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/B/e2d9d9b845eb4e268e0e22b20edd1a8d 2024-12-06T10:17:22,157 DEBUG [StoreCloser-TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/B/67678503e1dd4dc7be58d3a3eec00817 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/B/67678503e1dd4dc7be58d3a3eec00817 2024-12-06T10:17:22,158 DEBUG [StoreCloser-TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/B/f59976c62129486982cf5127381d7e8e to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/B/f59976c62129486982cf5127381d7e8e 2024-12-06T10:17:22,159 DEBUG [StoreCloser-TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/B/35988e34183c474c9b7a7604d14c22a4 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/B/35988e34183c474c9b7a7604d14c22a4 2024-12-06T10:17:22,161 DEBUG [StoreCloser-TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/B/fe567e62b4a5471a98ca002af5cf538a to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/B/fe567e62b4a5471a98ca002af5cf538a 2024-12-06T10:17:22,162 DEBUG [StoreCloser-TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/B/d7115ea069a04e28a8a3e85c3c9b8d5d to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/B/d7115ea069a04e28a8a3e85c3c9b8d5d 2024-12-06T10:17:22,162 DEBUG [StoreCloser-TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/B/bdc576705f1845c4afb4f0ab9c3ece49 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/B/bdc576705f1845c4afb4f0ab9c3ece49 2024-12-06T10:17:22,164 DEBUG [StoreCloser-TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/B/2203c0b90d6a451586516dcf8ce40eca to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/B/2203c0b90d6a451586516dcf8ce40eca 2024-12-06T10:17:22,165 DEBUG [StoreCloser-TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/B/250670863cb64b3db3b22dba6ac0e464 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/B/250670863cb64b3db3b22dba6ac0e464 2024-12-06T10:17:22,166 DEBUG [StoreCloser-TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/B/98ef89a372d64938a0ac4fbd1a069481 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/B/98ef89a372d64938a0ac4fbd1a069481 2024-12-06T10:17:22,167 DEBUG [StoreCloser-TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/B/2f8f489fc12e476fbc2ff7ad4184cb69 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/B/2f8f489fc12e476fbc2ff7ad4184cb69 2024-12-06T10:17:22,169 DEBUG [StoreCloser-TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/B/b01a7a033c9b43818214700631a18104 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/B/b01a7a033c9b43818214700631a18104 2024-12-06T10:17:22,170 DEBUG [StoreCloser-TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/B/cb41f0ef1ffb4a28b396a7297574694e to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/B/cb41f0ef1ffb4a28b396a7297574694e 2024-12-06T10:17:22,171 DEBUG [StoreCloser-TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/B/978ef75d9cb54cff9129245634d6dc17 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/B/978ef75d9cb54cff9129245634d6dc17 2024-12-06T10:17:22,173 DEBUG [StoreCloser-TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/B/656296dee1b241ffbc152db35de344ce to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/B/656296dee1b241ffbc152db35de344ce 2024-12-06T10:17:22,174 DEBUG [StoreCloser-TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/B/86b5691360ff4303bf0720dfe8e618e7 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/B/86b5691360ff4303bf0720dfe8e618e7 2024-12-06T10:17:22,175 DEBUG [StoreCloser-TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/B/072a8e0596e044fe81a5bddc9ddcb1e3 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/B/072a8e0596e044fe81a5bddc9ddcb1e3 2024-12-06T10:17:22,177 DEBUG [StoreCloser-TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/B/ab3d4085edbe4e8faf8427763a14a4a8 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/B/ab3d4085edbe4e8faf8427763a14a4a8 2024-12-06T10:17:22,178 DEBUG [StoreCloser-TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/B/de58abee26c5402a80237519b3e00e40 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/B/de58abee26c5402a80237519b3e00e40 2024-12-06T10:17:22,180 DEBUG [StoreCloser-TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/B/516c9d5363fe49e88807697ae8097a79 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/B/516c9d5363fe49e88807697ae8097a79 2024-12-06T10:17:22,181 DEBUG [StoreCloser-TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/B/b90ebb3dc8df4af58e1aee44dabac07a to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/B/b90ebb3dc8df4af58e1aee44dabac07a 2024-12-06T10:17:22,183 DEBUG [StoreCloser-TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/B/40862726ae484cdfa9378e2dadf82747 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/B/40862726ae484cdfa9378e2dadf82747 2024-12-06T10:17:22,184 DEBUG [StoreCloser-TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/B/f3abf245f5734cf2bb04569c827d32a9 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/B/f3abf245f5734cf2bb04569c827d32a9 2024-12-06T10:17:22,185 DEBUG [StoreCloser-TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/B/078bf0d48a234dbfab0418619e0b7b26 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/B/078bf0d48a234dbfab0418619e0b7b26 2024-12-06T10:17:22,186 DEBUG [StoreCloser-TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/B/cee88b2fe49946df96ca7732a8d48b80 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/B/cee88b2fe49946df96ca7732a8d48b80 2024-12-06T10:17:22,188 DEBUG [StoreCloser-TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/B/abf63d44b7a94ea6a820ecce97a33da9 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/B/abf63d44b7a94ea6a820ecce97a33da9 2024-12-06T10:17:22,189 DEBUG [StoreCloser-TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/B/0749e4107b9d452c8cd2e0c157be3de3 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/B/0749e4107b9d452c8cd2e0c157be3de3 2024-12-06T10:17:22,190 DEBUG [StoreCloser-TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/B/45445a5985024dc7843c274e7fe253ab to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/B/45445a5985024dc7843c274e7fe253ab 2024-12-06T10:17:22,192 DEBUG [StoreCloser-TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/B/768901de0e87453ca5fa60deb010d080 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/B/768901de0e87453ca5fa60deb010d080 2024-12-06T10:17:22,193 DEBUG [StoreCloser-TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/B/04970250b60b400b88c1e9121a5fa8a9 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/B/04970250b60b400b88c1e9121a5fa8a9 2024-12-06T10:17:22,194 DEBUG [StoreCloser-TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/B/367f1f8fac5446ae9fa26bbefc092e50 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/B/367f1f8fac5446ae9fa26bbefc092e50 2024-12-06T10:17:22,195 DEBUG [StoreCloser-TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/B/7a18f27548734de6979e8d990f1bec0f to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/B/7a18f27548734de6979e8d990f1bec0f 2024-12-06T10:17:22,196 DEBUG [StoreCloser-TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/B/33c1e9e6a0674489a8b4a0dcde53c659 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/B/33c1e9e6a0674489a8b4a0dcde53c659 2024-12-06T10:17:22,197 DEBUG [StoreCloser-TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/B/0105d4378c384f0083e7e65fe21a76df to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/B/0105d4378c384f0083e7e65fe21a76df 2024-12-06T10:17:22,198 DEBUG [StoreCloser-TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/B/4a913af414a74ab69b2300a2e92733f3 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/B/4a913af414a74ab69b2300a2e92733f3 2024-12-06T10:17:22,199 DEBUG [StoreCloser-TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/B/e5286817026545c2a8642d8d1e1aeb74 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/B/e5286817026545c2a8642d8d1e1aeb74 2024-12-06T10:17:22,201 DEBUG [StoreCloser-TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/B/708aa6d11e2c407692610ff7b4f050a7 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/B/708aa6d11e2c407692610ff7b4f050a7 2024-12-06T10:17:22,202 DEBUG [StoreCloser-TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/C/d609142fb37b4be78d6049eab52474d4, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/C/edd3d87ad0274960b0e41ac2022ecc87, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/C/dfabba2d3f4741c09080e57da54f3a1e, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/C/442b24b635cd4d75b3bd4180ba7497e6, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/C/3f61562fe0a145a7b909bbc0047ef1c6, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/C/43782ec9c51c477a8cb7b11e6a39250e, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/C/af8acf9e74a941aebc71081748d9414a, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/C/2faf0738048046d4b38f7841088e19df, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/C/e66f016dcf174196b717df2d1a2fa0af, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/C/b378b116280541d98a400b811fbf5c0f, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/C/449bf2020d294fe9a087bf1e8d07b25b, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/C/cfed5a589e664bd796fb5f031cdfd632, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/C/67ccf3b07fb24b25b9526bf590a1e31d, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/C/5d2a62038cf046b887758630681c0842, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/C/99e41068e3a049d3b357a4f5ea0c3083, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/C/f8850add885e4ed388ecd29cacf0a076, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/C/0caa2f09f972442dbea12f31d530d59c, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/C/3b3fe211d914436bb3657b4e5b9abf41, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/C/3fd16b652d0d4e9da6bede37c15d6398, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/C/22d53536dd1d4f38802c010ded8adf15, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/C/149615e2a4e848e0b97e6efac21429d2, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/C/8d66d30d86d94f469af041280f319d3b, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/C/721379b8896e4d9aa870860966dc9c8f, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/C/ed7b37c940b94ba1abd0602676455c8b, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/C/c7e5249d0c724e20b873e52bf53b0527, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/C/bfbcb8331c244c2a9978127a5740bd52, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/C/63014b52b6504c27940c7351de9eb356, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/C/738c3d438c414e189e039d49a6ef915e, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/C/659ff57a17944e44b9a80f348e9579e6, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/C/be8dd653f7eb4772b9fdc8219c1f200d, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/C/39ba5771ca05407b8fc0bef2cab7f9de, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/C/8584c6885f2c4a9a883142608020f645, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/C/3e3c917afadd44ed803ab1dc8e8a92fa, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/C/5159ffeb18b04a458191e8b194c0db91, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/C/b248db06ea77457288ffd288cd445069, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/C/afef44fc564043058eb987a3b755861c, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/C/f2956fdd904a4e3ab6d0295f48dd7288, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/C/3a866253d32147cc86bfe304c6754c85, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/C/e29c251816bf47b68a84ffc868304d73, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/C/c5dbf061a918442c9899064c80763b8a, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/C/246642f604cb44378303c9609f3c2d8a, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/C/d2a8322969ea4184bd4e53e54506ecfb, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/C/adc465e27a0b466bb7a9876d1bff0a74, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/C/8ca6d147d2b847fb950ade2187b2ed88, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/C/e6f58ce9ea1248df8f653950802d3cd4] to archive 2024-12-06T10:17:22,203 DEBUG [StoreCloser-TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-12-06T10:17:22,205 DEBUG [StoreCloser-TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/C/d609142fb37b4be78d6049eab52474d4 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/C/d609142fb37b4be78d6049eab52474d4 2024-12-06T10:17:22,206 DEBUG [StoreCloser-TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/C/edd3d87ad0274960b0e41ac2022ecc87 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/C/edd3d87ad0274960b0e41ac2022ecc87 2024-12-06T10:17:22,207 DEBUG [StoreCloser-TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/C/dfabba2d3f4741c09080e57da54f3a1e to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/C/dfabba2d3f4741c09080e57da54f3a1e 2024-12-06T10:17:22,209 DEBUG [StoreCloser-TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/C/442b24b635cd4d75b3bd4180ba7497e6 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/C/442b24b635cd4d75b3bd4180ba7497e6 2024-12-06T10:17:22,210 DEBUG [StoreCloser-TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/C/3f61562fe0a145a7b909bbc0047ef1c6 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/C/3f61562fe0a145a7b909bbc0047ef1c6 2024-12-06T10:17:22,211 DEBUG [StoreCloser-TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/C/43782ec9c51c477a8cb7b11e6a39250e to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/C/43782ec9c51c477a8cb7b11e6a39250e 2024-12-06T10:17:22,212 DEBUG [StoreCloser-TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/C/af8acf9e74a941aebc71081748d9414a to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/C/af8acf9e74a941aebc71081748d9414a 2024-12-06T10:17:22,213 DEBUG [StoreCloser-TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/C/2faf0738048046d4b38f7841088e19df to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/C/2faf0738048046d4b38f7841088e19df 2024-12-06T10:17:22,214 DEBUG [StoreCloser-TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/C/e66f016dcf174196b717df2d1a2fa0af to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/C/e66f016dcf174196b717df2d1a2fa0af 2024-12-06T10:17:22,216 DEBUG [StoreCloser-TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/C/b378b116280541d98a400b811fbf5c0f to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/C/b378b116280541d98a400b811fbf5c0f 2024-12-06T10:17:22,217 DEBUG [StoreCloser-TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/C/449bf2020d294fe9a087bf1e8d07b25b to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/C/449bf2020d294fe9a087bf1e8d07b25b 2024-12-06T10:17:22,218 DEBUG [StoreCloser-TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/C/cfed5a589e664bd796fb5f031cdfd632 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/C/cfed5a589e664bd796fb5f031cdfd632 2024-12-06T10:17:22,220 DEBUG [StoreCloser-TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/C/67ccf3b07fb24b25b9526bf590a1e31d to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/C/67ccf3b07fb24b25b9526bf590a1e31d 2024-12-06T10:17:22,221 DEBUG [StoreCloser-TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/C/5d2a62038cf046b887758630681c0842 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/C/5d2a62038cf046b887758630681c0842 2024-12-06T10:17:22,222 DEBUG [StoreCloser-TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/C/99e41068e3a049d3b357a4f5ea0c3083 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/C/99e41068e3a049d3b357a4f5ea0c3083 2024-12-06T10:17:22,223 DEBUG [StoreCloser-TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/C/f8850add885e4ed388ecd29cacf0a076 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/C/f8850add885e4ed388ecd29cacf0a076 2024-12-06T10:17:22,224 DEBUG [StoreCloser-TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/C/0caa2f09f972442dbea12f31d530d59c to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/C/0caa2f09f972442dbea12f31d530d59c 2024-12-06T10:17:22,226 DEBUG [StoreCloser-TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/C/3b3fe211d914436bb3657b4e5b9abf41 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/C/3b3fe211d914436bb3657b4e5b9abf41 2024-12-06T10:17:22,227 DEBUG [StoreCloser-TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/C/3fd16b652d0d4e9da6bede37c15d6398 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/C/3fd16b652d0d4e9da6bede37c15d6398 2024-12-06T10:17:22,229 DEBUG [StoreCloser-TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/C/22d53536dd1d4f38802c010ded8adf15 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/C/22d53536dd1d4f38802c010ded8adf15 2024-12-06T10:17:22,230 DEBUG [StoreCloser-TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/C/149615e2a4e848e0b97e6efac21429d2 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/C/149615e2a4e848e0b97e6efac21429d2 2024-12-06T10:17:22,232 DEBUG [StoreCloser-TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/C/8d66d30d86d94f469af041280f319d3b to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/C/8d66d30d86d94f469af041280f319d3b 2024-12-06T10:17:22,233 DEBUG [StoreCloser-TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/C/721379b8896e4d9aa870860966dc9c8f to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/C/721379b8896e4d9aa870860966dc9c8f 2024-12-06T10:17:22,234 DEBUG [StoreCloser-TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/C/ed7b37c940b94ba1abd0602676455c8b to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/C/ed7b37c940b94ba1abd0602676455c8b 2024-12-06T10:17:22,235 DEBUG [StoreCloser-TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/C/c7e5249d0c724e20b873e52bf53b0527 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/C/c7e5249d0c724e20b873e52bf53b0527 2024-12-06T10:17:22,236 DEBUG [StoreCloser-TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/C/bfbcb8331c244c2a9978127a5740bd52 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/C/bfbcb8331c244c2a9978127a5740bd52 2024-12-06T10:17:22,237 DEBUG [StoreCloser-TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/C/63014b52b6504c27940c7351de9eb356 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/C/63014b52b6504c27940c7351de9eb356 2024-12-06T10:17:22,239 DEBUG [StoreCloser-TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/C/738c3d438c414e189e039d49a6ef915e to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/C/738c3d438c414e189e039d49a6ef915e 2024-12-06T10:17:22,240 DEBUG [StoreCloser-TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/C/659ff57a17944e44b9a80f348e9579e6 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/C/659ff57a17944e44b9a80f348e9579e6 2024-12-06T10:17:22,241 DEBUG [StoreCloser-TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/C/be8dd653f7eb4772b9fdc8219c1f200d to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/C/be8dd653f7eb4772b9fdc8219c1f200d 2024-12-06T10:17:22,242 DEBUG [StoreCloser-TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/C/39ba5771ca05407b8fc0bef2cab7f9de to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/C/39ba5771ca05407b8fc0bef2cab7f9de 2024-12-06T10:17:22,243 DEBUG [StoreCloser-TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/C/8584c6885f2c4a9a883142608020f645 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/C/8584c6885f2c4a9a883142608020f645 2024-12-06T10:17:22,244 DEBUG [StoreCloser-TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/C/3e3c917afadd44ed803ab1dc8e8a92fa to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/C/3e3c917afadd44ed803ab1dc8e8a92fa 2024-12-06T10:17:22,245 DEBUG [StoreCloser-TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/C/5159ffeb18b04a458191e8b194c0db91 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/C/5159ffeb18b04a458191e8b194c0db91 2024-12-06T10:17:22,246 DEBUG [StoreCloser-TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/C/b248db06ea77457288ffd288cd445069 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/C/b248db06ea77457288ffd288cd445069 2024-12-06T10:17:22,247 DEBUG [StoreCloser-TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/C/afef44fc564043058eb987a3b755861c to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/C/afef44fc564043058eb987a3b755861c 2024-12-06T10:17:22,249 DEBUG [StoreCloser-TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/C/f2956fdd904a4e3ab6d0295f48dd7288 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/C/f2956fdd904a4e3ab6d0295f48dd7288 2024-12-06T10:17:22,250 DEBUG [StoreCloser-TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/C/3a866253d32147cc86bfe304c6754c85 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/C/3a866253d32147cc86bfe304c6754c85 2024-12-06T10:17:22,251 DEBUG [StoreCloser-TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/C/e29c251816bf47b68a84ffc868304d73 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/C/e29c251816bf47b68a84ffc868304d73 2024-12-06T10:17:22,253 DEBUG [StoreCloser-TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/C/c5dbf061a918442c9899064c80763b8a to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/C/c5dbf061a918442c9899064c80763b8a 2024-12-06T10:17:22,254 DEBUG [StoreCloser-TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/C/246642f604cb44378303c9609f3c2d8a to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/C/246642f604cb44378303c9609f3c2d8a 2024-12-06T10:17:22,255 DEBUG [StoreCloser-TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/C/d2a8322969ea4184bd4e53e54506ecfb to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/C/d2a8322969ea4184bd4e53e54506ecfb 2024-12-06T10:17:22,256 DEBUG [StoreCloser-TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/C/adc465e27a0b466bb7a9876d1bff0a74 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/C/adc465e27a0b466bb7a9876d1bff0a74 2024-12-06T10:17:22,257 DEBUG [StoreCloser-TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/C/8ca6d147d2b847fb950ade2187b2ed88 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/C/8ca6d147d2b847fb950ade2187b2ed88 2024-12-06T10:17:22,259 DEBUG [StoreCloser-TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/C/e6f58ce9ea1248df8f653950802d3cd4 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/C/e6f58ce9ea1248df8f653950802d3cd4 2024-12-06T10:17:22,265 DEBUG [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION, pid=37}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/recovered.edits/667.seqid, newMaxSeqId=667, maxSeqId=1 2024-12-06T10:17:22,268 INFO [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION, pid=37}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df. 2024-12-06T10:17:22,268 DEBUG [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION, pid=37}] regionserver.HRegion(1635): Region close journal for b58170106b3730174deb9625aeac23df: 2024-12-06T10:17:22,270 INFO [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION, pid=37}] handler.UnassignRegionHandler(170): Closed b58170106b3730174deb9625aeac23df 2024-12-06T10:17:22,271 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=36 updating hbase:meta row=b58170106b3730174deb9625aeac23df, regionState=CLOSED 2024-12-06T10:17:22,273 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=37, resume processing ppid=36 2024-12-06T10:17:22,273 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=37, ppid=36, state=SUCCESS; CloseRegionProcedure b58170106b3730174deb9625aeac23df, server=552d6a33fa09,33397,1733480204743 in 1.6310 sec 2024-12-06T10:17:22,275 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=36, resume processing ppid=35 2024-12-06T10:17:22,275 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=36, ppid=35, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=b58170106b3730174deb9625aeac23df, UNASSIGN in 1.6350 sec 2024-12-06T10:17:22,277 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=35, resume processing ppid=34 2024-12-06T10:17:22,277 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=35, ppid=34, state=SUCCESS; CloseTableRegionsProcedure table=TestAcidGuarantees in 1.6420 sec 2024-12-06T10:17:22,278 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733480242278"}]},"ts":"1733480242278"} 2024-12-06T10:17:22,279 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLED in hbase:meta 2024-12-06T10:17:22,282 INFO [PEWorker-5 {}] procedure.DisableTableProcedure(296): Set TestAcidGuarantees to state=DISABLED 2024-12-06T10:17:22,283 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=34, state=SUCCESS; DisableTableProcedure table=TestAcidGuarantees in 1.6600 sec 2024-12-06T10:17:22,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=34 2024-12-06T10:17:22,734 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:TestAcidGuarantees, procId: 34 completed 2024-12-06T10:17:22,737 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.2 delete TestAcidGuarantees 2024-12-06T10:17:22,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] procedure2.ProcedureExecutor(1098): Stored pid=38, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=TestAcidGuarantees 2024-12-06T10:17:22,742 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=38, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-06T10:17:22,744 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=38, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-06T10:17:22,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=38 2024-12-06T10:17:22,748 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df 2024-12-06T10:17:22,752 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/A, FileablePath, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/B, FileablePath, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/C, FileablePath, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/recovered.edits] 2024-12-06T10:17:22,755 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/A/3b6bd4d2027342b4b14162488638ec83 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/A/3b6bd4d2027342b4b14162488638ec83 2024-12-06T10:17:22,756 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/A/fa9906c3e7e741649718eb93d19fd15a to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/A/fa9906c3e7e741649718eb93d19fd15a 2024-12-06T10:17:22,759 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/B/35057a9a73514bccba32a571f4bb866f to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/B/35057a9a73514bccba32a571f4bb866f 2024-12-06T10:17:22,760 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/B/6cca589e8edb4e1896ab3d99c3af220e to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/B/6cca589e8edb4e1896ab3d99c3af220e 2024-12-06T10:17:22,763 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/C/28010728ada1448d82cf9532af8585d9 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/C/28010728ada1448d82cf9532af8585d9 2024-12-06T10:17:22,764 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/C/5fdd41f139d54516a20ea9a2d37044fb to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/C/5fdd41f139d54516a20ea9a2d37044fb 2024-12-06T10:17:22,768 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/recovered.edits/667.seqid to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df/recovered.edits/667.seqid 2024-12-06T10:17:22,768 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/b58170106b3730174deb9625aeac23df 2024-12-06T10:17:22,768 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(313): Archived TestAcidGuarantees regions 2024-12-06T10:17:22,774 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=38, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-06T10:17:22,778 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33397 {}] util.ReflectedFunctionCache(97): Populated cache for org.apache.hadoop.hbase.filter.KeyOnlyFilter in 0ms 2024-12-06T10:17:22,781 WARN [PEWorker-2 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 1 rows of TestAcidGuarantees from hbase:meta 2024-12-06T10:17:22,815 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(407): Removing 'TestAcidGuarantees' descriptor. 2024-12-06T10:17:22,817 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=38, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-06T10:17:22,817 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(397): Removing 'TestAcidGuarantees' from region states. 2024-12-06T10:17:22,817 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733480242817"}]},"ts":"9223372036854775807"} 2024-12-06T10:17:22,821 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1808): Deleted 1 regions from META 2024-12-06T10:17:22,821 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => b58170106b3730174deb9625aeac23df, NAME => 'TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df.', STARTKEY => '', ENDKEY => ''}] 2024-12-06T10:17:22,821 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(401): Marking 'TestAcidGuarantees' as deleted. 2024-12-06T10:17:22,821 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1733480242821"}]},"ts":"9223372036854775807"} 2024-12-06T10:17:22,824 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1678): Deleted table TestAcidGuarantees state from META 2024-12-06T10:17:22,827 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(133): Finished pid=38, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-06T10:17:22,828 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=38, state=SUCCESS; DeleteTableProcedure table=TestAcidGuarantees in 90 msec 2024-12-06T10:17:22,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=38 2024-12-06T10:17:22,845 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:TestAcidGuarantees, procId: 38 completed 2024-12-06T10:17:22,859 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: TestAcidGuaranteesWithAdaptivePolicy#testMixedAtomicity Thread=238 (was 219) Potentially hanging thread: RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x68a406ef-shared-pool-7 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-6 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_OPEN_REGION-regionserver/552d6a33fa09:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-3 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x68a406ef-shared-pool-5 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-4 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS:0;552d6a33fa09:33397-shortCompactions-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.PriorityBlockingQueue.take(PriorityBlockingQueue.java:535) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-2 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:62) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:883) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-9 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-5 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-8 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-12 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-1360265992_22 at /127.0.0.1:37436 [Waiting for operation #13] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RSProcedureDispatcher-pool-2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x68a406ef-shared-pool-4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Timer for 'HBase' metrics system java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: hconnection-0x68a406ef-shared-pool-6 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-10 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS-EventLoopGroup-1-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-7 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-13 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-11 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=451 (was 444) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=316 (was 189) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=6601 (was 7120) 2024-12-06T10:17:22,869 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: TestAcidGuaranteesWithAdaptivePolicy#testMobMixedAtomicity Thread=238, OpenFileDescriptor=451, MaxFileDescriptor=1048576, SystemLoadAverage=316, ProcessCount=11, AvailableMemoryMB=6600 2024-12-06T10:17:22,871 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-12-06T10:17:22,871 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-06T10:17:22,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] procedure2.ProcedureExecutor(1098): Stored pid=39, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=TestAcidGuarantees 2024-12-06T10:17:22,873 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=39, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_PRE_OPERATION 2024-12-06T10:17:22,873 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:22,873 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestAcidGuarantees" procId is: 39 2024-12-06T10:17:22,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=39 2024-12-06T10:17:22,874 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=39, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-06T10:17:22,880 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073741982_1158 (size=963) 2024-12-06T10:17:22,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=39 2024-12-06T10:17:23,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=39 2024-12-06T10:17:23,283 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => cba9fa4bb7ad0155608756e918c3bf01, NAME => 'TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4 2024-12-06T10:17:23,288 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073741983_1159 (size=53) 2024-12-06T10:17:23,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=39 2024-12-06T10:17:23,689 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-06T10:17:23,690 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1681): Closing cba9fa4bb7ad0155608756e918c3bf01, disabling compactions & flushes 2024-12-06T10:17:23,690 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01. 2024-12-06T10:17:23,690 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01. 2024-12-06T10:17:23,690 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01. after waiting 0 ms 2024-12-06T10:17:23,690 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01. 2024-12-06T10:17:23,690 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01. 2024-12-06T10:17:23,690 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1635): Region close journal for cba9fa4bb7ad0155608756e918c3bf01: 2024-12-06T10:17:23,691 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=39, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ADD_TO_META 2024-12-06T10:17:23,691 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01.","families":{"info":[{"qualifier":"regioninfo","vlen":52,"tag":[],"timestamp":"1733480243691"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733480243691"}]},"ts":"1733480243691"} 2024-12-06T10:17:23,693 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-12-06T10:17:23,694 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=39, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-06T10:17:23,694 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733480243694"}]},"ts":"1733480243694"} 2024-12-06T10:17:23,695 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLING in hbase:meta 2024-12-06T10:17:23,702 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=40, ppid=39, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=cba9fa4bb7ad0155608756e918c3bf01, ASSIGN}] 2024-12-06T10:17:23,704 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=40, ppid=39, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=cba9fa4bb7ad0155608756e918c3bf01, ASSIGN 2024-12-06T10:17:23,704 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(264): Starting pid=40, ppid=39, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=cba9fa4bb7ad0155608756e918c3bf01, ASSIGN; state=OFFLINE, location=552d6a33fa09,33397,1733480204743; forceNewPlan=false, retain=false 2024-12-06T10:17:23,855 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=40 updating hbase:meta row=cba9fa4bb7ad0155608756e918c3bf01, regionState=OPENING, regionLocation=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:23,857 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=41, ppid=40, state=RUNNABLE; OpenRegionProcedure cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743}] 2024-12-06T10:17:23,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=39 2024-12-06T10:17:24,009 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,33397,1733480204743 2024-12-06T10:17:24,012 INFO [RS_OPEN_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_REGION, pid=41}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01. 2024-12-06T10:17:24,012 DEBUG [RS_OPEN_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_REGION, pid=41}] regionserver.HRegion(7285): Opening region: {ENCODED => cba9fa4bb7ad0155608756e918c3bf01, NAME => 'TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01.', STARTKEY => '', ENDKEY => ''} 2024-12-06T10:17:24,013 DEBUG [RS_OPEN_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_REGION, pid=41}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees cba9fa4bb7ad0155608756e918c3bf01 2024-12-06T10:17:24,013 DEBUG [RS_OPEN_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_REGION, pid=41}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-06T10:17:24,013 DEBUG [RS_OPEN_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_REGION, pid=41}] regionserver.HRegion(7327): checking encryption for cba9fa4bb7ad0155608756e918c3bf01 2024-12-06T10:17:24,013 DEBUG [RS_OPEN_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_REGION, pid=41}] regionserver.HRegion(7330): checking classloading for cba9fa4bb7ad0155608756e918c3bf01 2024-12-06T10:17:24,014 INFO [StoreOpener-cba9fa4bb7ad0155608756e918c3bf01-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region cba9fa4bb7ad0155608756e918c3bf01 2024-12-06T10:17:24,016 INFO [StoreOpener-cba9fa4bb7ad0155608756e918c3bf01-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-06T10:17:24,016 INFO [StoreOpener-cba9fa4bb7ad0155608756e918c3bf01-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region cba9fa4bb7ad0155608756e918c3bf01 columnFamilyName A 2024-12-06T10:17:24,016 DEBUG [StoreOpener-cba9fa4bb7ad0155608756e918c3bf01-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:24,016 INFO [StoreOpener-cba9fa4bb7ad0155608756e918c3bf01-1 {}] regionserver.HStore(327): Store=cba9fa4bb7ad0155608756e918c3bf01/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-06T10:17:24,016 INFO [StoreOpener-cba9fa4bb7ad0155608756e918c3bf01-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region cba9fa4bb7ad0155608756e918c3bf01 2024-12-06T10:17:24,017 INFO [StoreOpener-cba9fa4bb7ad0155608756e918c3bf01-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-06T10:17:24,018 INFO [StoreOpener-cba9fa4bb7ad0155608756e918c3bf01-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region cba9fa4bb7ad0155608756e918c3bf01 columnFamilyName B 2024-12-06T10:17:24,018 DEBUG [StoreOpener-cba9fa4bb7ad0155608756e918c3bf01-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:24,019 INFO [StoreOpener-cba9fa4bb7ad0155608756e918c3bf01-1 {}] regionserver.HStore(327): Store=cba9fa4bb7ad0155608756e918c3bf01/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-06T10:17:24,019 INFO [StoreOpener-cba9fa4bb7ad0155608756e918c3bf01-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region cba9fa4bb7ad0155608756e918c3bf01 2024-12-06T10:17:24,020 INFO [StoreOpener-cba9fa4bb7ad0155608756e918c3bf01-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-06T10:17:24,020 INFO [StoreOpener-cba9fa4bb7ad0155608756e918c3bf01-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region cba9fa4bb7ad0155608756e918c3bf01 columnFamilyName C 2024-12-06T10:17:24,020 DEBUG [StoreOpener-cba9fa4bb7ad0155608756e918c3bf01-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:24,021 INFO [StoreOpener-cba9fa4bb7ad0155608756e918c3bf01-1 {}] regionserver.HStore(327): Store=cba9fa4bb7ad0155608756e918c3bf01/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-06T10:17:24,021 INFO [RS_OPEN_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_REGION, pid=41}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01. 2024-12-06T10:17:24,022 DEBUG [RS_OPEN_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_REGION, pid=41}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01 2024-12-06T10:17:24,022 DEBUG [RS_OPEN_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_REGION, pid=41}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01 2024-12-06T10:17:24,023 DEBUG [RS_OPEN_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_REGION, pid=41}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-06T10:17:24,024 DEBUG [RS_OPEN_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_REGION, pid=41}] regionserver.HRegion(1085): writing seq id for cba9fa4bb7ad0155608756e918c3bf01 2024-12-06T10:17:24,027 DEBUG [RS_OPEN_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_REGION, pid=41}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-06T10:17:24,027 INFO [RS_OPEN_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_REGION, pid=41}] regionserver.HRegion(1102): Opened cba9fa4bb7ad0155608756e918c3bf01; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=70470473, jitterRate=0.050091877579689026}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-06T10:17:24,028 DEBUG [RS_OPEN_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_REGION, pid=41}] regionserver.HRegion(1001): Region open journal for cba9fa4bb7ad0155608756e918c3bf01: 2024-12-06T10:17:24,028 INFO [RS_OPEN_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_REGION, pid=41}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01., pid=41, masterSystemTime=1733480244008 2024-12-06T10:17:24,030 DEBUG [RS_OPEN_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_REGION, pid=41}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01. 2024-12-06T10:17:24,030 INFO [RS_OPEN_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_REGION, pid=41}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01. 2024-12-06T10:17:24,030 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=40 updating hbase:meta row=cba9fa4bb7ad0155608756e918c3bf01, regionState=OPEN, openSeqNum=2, regionLocation=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:24,033 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=41, resume processing ppid=40 2024-12-06T10:17:24,033 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=41, ppid=40, state=SUCCESS; OpenRegionProcedure cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 in 174 msec 2024-12-06T10:17:24,034 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=40, resume processing ppid=39 2024-12-06T10:17:24,035 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=40, ppid=39, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=cba9fa4bb7ad0155608756e918c3bf01, ASSIGN in 331 msec 2024-12-06T10:17:24,035 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=39, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-06T10:17:24,035 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733480244035"}]},"ts":"1733480244035"} 2024-12-06T10:17:24,036 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLED in hbase:meta 2024-12-06T10:17:24,039 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=39, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_POST_OPERATION 2024-12-06T10:17:24,041 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=39, state=SUCCESS; CreateTableProcedure table=TestAcidGuarantees in 1.1680 sec 2024-12-06T10:17:24,979 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=39 2024-12-06T10:17:24,980 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:TestAcidGuarantees, procId: 39 completed 2024-12-06T10:17:24,982 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x0ff872d8 to 127.0.0.1:61610 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@4506927 2024-12-06T10:17:24,986 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7a9b9802, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-06T10:17:24,988 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-06T10:17:24,990 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43856, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-06T10:17:24,991 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-06T10:17:24,993 INFO [RS-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33922, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-06T10:17:24,999 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-12-06T10:17:24,999 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.HMaster$14(2798): Client=jenkins//172.17.0.2 modify table TestAcidGuarantees from 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} to 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '4', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-06T10:17:25,005 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] procedure2.ProcedureExecutor(1098): Stored pid=42, state=RUNNABLE:MODIFY_TABLE_PREPARE; ModifyTableProcedure table=TestAcidGuarantees 2024-12-06T10:17:25,016 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073741984_1160 (size=999) 2024-12-06T10:17:25,418 DEBUG [PEWorker-4 {}] util.FSTableDescriptors(519): Deleted hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/.tabledesc/.tableinfo.0000000001.963 2024-12-06T10:17:25,419 INFO [PEWorker-4 {}] util.FSTableDescriptors(297): Updated tableinfo=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/.tabledesc/.tableinfo.0000000002.999 2024-12-06T10:17:25,422 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=43, ppid=42, state=RUNNABLE:REOPEN_TABLE_REGIONS_GET_REGIONS; ReopenTableRegionsProcedure table=TestAcidGuarantees}] 2024-12-06T10:17:25,432 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=44, ppid=43, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=cba9fa4bb7ad0155608756e918c3bf01, REOPEN/MOVE}] 2024-12-06T10:17:25,433 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=44, ppid=43, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=cba9fa4bb7ad0155608756e918c3bf01, REOPEN/MOVE 2024-12-06T10:17:25,433 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=44 updating hbase:meta row=cba9fa4bb7ad0155608756e918c3bf01, regionState=CLOSING, regionLocation=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:25,435 DEBUG [PEWorker-2 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-06T10:17:25,435 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=45, ppid=44, state=RUNNABLE; CloseRegionProcedure cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743}] 2024-12-06T10:17:25,586 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,33397,1733480204743 2024-12-06T10:17:25,587 INFO [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION, pid=45}] handler.UnassignRegionHandler(124): Close cba9fa4bb7ad0155608756e918c3bf01 2024-12-06T10:17:25,587 DEBUG [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION, pid=45}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-06T10:17:25,587 DEBUG [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION, pid=45}] regionserver.HRegion(1681): Closing cba9fa4bb7ad0155608756e918c3bf01, disabling compactions & flushes 2024-12-06T10:17:25,587 INFO [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION, pid=45}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01. 2024-12-06T10:17:25,587 DEBUG [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION, pid=45}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01. 2024-12-06T10:17:25,587 DEBUG [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION, pid=45}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01. after waiting 0 ms 2024-12-06T10:17:25,587 DEBUG [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION, pid=45}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01. 2024-12-06T10:17:25,591 DEBUG [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION, pid=45}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/recovered.edits/4.seqid, newMaxSeqId=4, maxSeqId=1 2024-12-06T10:17:25,592 INFO [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION, pid=45}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01. 2024-12-06T10:17:25,592 DEBUG [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION, pid=45}] regionserver.HRegion(1635): Region close journal for cba9fa4bb7ad0155608756e918c3bf01: 2024-12-06T10:17:25,592 WARN [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION, pid=45}] regionserver.HRegionServer(3786): Not adding moved region record: cba9fa4bb7ad0155608756e918c3bf01 to self. 2024-12-06T10:17:25,594 INFO [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION, pid=45}] handler.UnassignRegionHandler(170): Closed cba9fa4bb7ad0155608756e918c3bf01 2024-12-06T10:17:25,594 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=44 updating hbase:meta row=cba9fa4bb7ad0155608756e918c3bf01, regionState=CLOSED 2024-12-06T10:17:25,596 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=45, resume processing ppid=44 2024-12-06T10:17:25,596 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=45, ppid=44, state=SUCCESS; CloseRegionProcedure cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 in 160 msec 2024-12-06T10:17:25,597 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(264): Starting pid=44, ppid=43, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=cba9fa4bb7ad0155608756e918c3bf01, REOPEN/MOVE; state=CLOSED, location=552d6a33fa09,33397,1733480204743; forceNewPlan=false, retain=true 2024-12-06T10:17:25,747 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=44 updating hbase:meta row=cba9fa4bb7ad0155608756e918c3bf01, regionState=OPENING, regionLocation=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:25,749 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=46, ppid=44, state=RUNNABLE; OpenRegionProcedure cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743}] 2024-12-06T10:17:25,900 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,33397,1733480204743 2024-12-06T10:17:25,904 INFO [RS_OPEN_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_REGION, pid=46}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01. 2024-12-06T10:17:25,904 DEBUG [RS_OPEN_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_REGION, pid=46}] regionserver.HRegion(7285): Opening region: {ENCODED => cba9fa4bb7ad0155608756e918c3bf01, NAME => 'TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01.', STARTKEY => '', ENDKEY => ''} 2024-12-06T10:17:25,904 DEBUG [RS_OPEN_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_REGION, pid=46}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees cba9fa4bb7ad0155608756e918c3bf01 2024-12-06T10:17:25,904 DEBUG [RS_OPEN_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_REGION, pid=46}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-06T10:17:25,905 DEBUG [RS_OPEN_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_REGION, pid=46}] regionserver.HRegion(7327): checking encryption for cba9fa4bb7ad0155608756e918c3bf01 2024-12-06T10:17:25,905 DEBUG [RS_OPEN_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_REGION, pid=46}] regionserver.HRegion(7330): checking classloading for cba9fa4bb7ad0155608756e918c3bf01 2024-12-06T10:17:25,907 INFO [StoreOpener-cba9fa4bb7ad0155608756e918c3bf01-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region cba9fa4bb7ad0155608756e918c3bf01 2024-12-06T10:17:25,908 INFO [StoreOpener-cba9fa4bb7ad0155608756e918c3bf01-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-06T10:17:25,913 INFO [StoreOpener-cba9fa4bb7ad0155608756e918c3bf01-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region cba9fa4bb7ad0155608756e918c3bf01 columnFamilyName A 2024-12-06T10:17:25,917 DEBUG [StoreOpener-cba9fa4bb7ad0155608756e918c3bf01-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:25,917 INFO [StoreOpener-cba9fa4bb7ad0155608756e918c3bf01-1 {}] regionserver.HStore(327): Store=cba9fa4bb7ad0155608756e918c3bf01/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-06T10:17:25,918 INFO [StoreOpener-cba9fa4bb7ad0155608756e918c3bf01-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region cba9fa4bb7ad0155608756e918c3bf01 2024-12-06T10:17:25,918 INFO [StoreOpener-cba9fa4bb7ad0155608756e918c3bf01-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-06T10:17:25,919 INFO [StoreOpener-cba9fa4bb7ad0155608756e918c3bf01-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region cba9fa4bb7ad0155608756e918c3bf01 columnFamilyName B 2024-12-06T10:17:25,919 DEBUG [StoreOpener-cba9fa4bb7ad0155608756e918c3bf01-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:25,919 INFO [StoreOpener-cba9fa4bb7ad0155608756e918c3bf01-1 {}] regionserver.HStore(327): Store=cba9fa4bb7ad0155608756e918c3bf01/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-06T10:17:25,919 INFO [StoreOpener-cba9fa4bb7ad0155608756e918c3bf01-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region cba9fa4bb7ad0155608756e918c3bf01 2024-12-06T10:17:25,920 INFO [StoreOpener-cba9fa4bb7ad0155608756e918c3bf01-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-06T10:17:25,920 INFO [StoreOpener-cba9fa4bb7ad0155608756e918c3bf01-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region cba9fa4bb7ad0155608756e918c3bf01 columnFamilyName C 2024-12-06T10:17:25,920 DEBUG [StoreOpener-cba9fa4bb7ad0155608756e918c3bf01-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:25,921 INFO [StoreOpener-cba9fa4bb7ad0155608756e918c3bf01-1 {}] regionserver.HStore(327): Store=cba9fa4bb7ad0155608756e918c3bf01/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-06T10:17:25,921 INFO [RS_OPEN_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_REGION, pid=46}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01. 2024-12-06T10:17:25,922 DEBUG [RS_OPEN_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_REGION, pid=46}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01 2024-12-06T10:17:25,923 DEBUG [RS_OPEN_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_REGION, pid=46}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01 2024-12-06T10:17:25,924 DEBUG [RS_OPEN_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_REGION, pid=46}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-06T10:17:25,925 DEBUG [RS_OPEN_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_REGION, pid=46}] regionserver.HRegion(1085): writing seq id for cba9fa4bb7ad0155608756e918c3bf01 2024-12-06T10:17:25,926 INFO [RS_OPEN_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_REGION, pid=46}] regionserver.HRegion(1102): Opened cba9fa4bb7ad0155608756e918c3bf01; next sequenceid=5; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=70001288, jitterRate=0.04310047626495361}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-06T10:17:25,928 DEBUG [RS_OPEN_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_REGION, pid=46}] regionserver.HRegion(1001): Region open journal for cba9fa4bb7ad0155608756e918c3bf01: 2024-12-06T10:17:25,928 INFO [RS_OPEN_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_REGION, pid=46}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01., pid=46, masterSystemTime=1733480245900 2024-12-06T10:17:25,930 DEBUG [RS_OPEN_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_REGION, pid=46}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01. 2024-12-06T10:17:25,930 INFO [RS_OPEN_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_REGION, pid=46}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01. 2024-12-06T10:17:25,930 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=44 updating hbase:meta row=cba9fa4bb7ad0155608756e918c3bf01, regionState=OPEN, openSeqNum=5, regionLocation=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:25,933 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=46, resume processing ppid=44 2024-12-06T10:17:25,933 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=46, ppid=44, state=SUCCESS; OpenRegionProcedure cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 in 183 msec 2024-12-06T10:17:25,935 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=44, resume processing ppid=43 2024-12-06T10:17:25,935 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=44, ppid=43, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=cba9fa4bb7ad0155608756e918c3bf01, REOPEN/MOVE in 501 msec 2024-12-06T10:17:25,937 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=43, resume processing ppid=42 2024-12-06T10:17:25,937 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=43, ppid=42, state=SUCCESS; ReopenTableRegionsProcedure table=TestAcidGuarantees in 515 msec 2024-12-06T10:17:25,940 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=42, state=SUCCESS; ModifyTableProcedure table=TestAcidGuarantees in 937 msec 2024-12-06T10:17:25,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=42 2024-12-06T10:17:25,949 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x46c2c778 to 127.0.0.1:61610 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@7362d978 2024-12-06T10:17:25,957 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2931c73e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-06T10:17:25,959 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x433e2b26 to 127.0.0.1:61610 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@7bad2e85 2024-12-06T10:17:25,963 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@491ea2ee, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-06T10:17:25,964 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x0e3a4420 to 127.0.0.1:61610 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@7ebda6ad 2024-12-06T10:17:25,967 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@b44b1e5, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-06T10:17:25,969 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x24f64590 to 127.0.0.1:61610 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@19a533a3 2024-12-06T10:17:25,972 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@46114993, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-06T10:17:25,973 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x7c5c4716 to 127.0.0.1:61610 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@465dc764 2024-12-06T10:17:25,977 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@a4c53ed, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-06T10:17:25,978 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x00cb464a to 127.0.0.1:61610 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@68f0be85 2024-12-06T10:17:25,981 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@247c0c93, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-06T10:17:25,982 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x78cafade to 127.0.0.1:61610 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@152377d4 2024-12-06T10:17:25,985 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@517ff977, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-06T10:17:25,986 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x14c16cd4 to 127.0.0.1:61610 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@1a52344f 2024-12-06T10:17:25,989 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3448d233, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-06T10:17:25,991 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x0341384e to 127.0.0.1:61610 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@8ba8425 2024-12-06T10:17:25,993 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7a11164b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-06T10:17:25,997 DEBUG [hconnection-0x3f96faad-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-06T10:17:25,998 DEBUG [hconnection-0x2663b309-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-06T10:17:25,998 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-06T10:17:25,999 DEBUG [hconnection-0x62b0284d-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-06T10:17:25,999 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] procedure2.ProcedureExecutor(1098): Stored pid=47, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=47, table=TestAcidGuarantees 2024-12-06T10:17:26,000 DEBUG [hconnection-0x3c3a9948-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-06T10:17:26,000 DEBUG [hconnection-0x2cf07b26-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-06T10:17:26,000 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43864, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-06T10:17:26,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=47 2024-12-06T10:17:26,000 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=47, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=47, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-06T10:17:26,001 DEBUG [hconnection-0x420eadaa-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-06T10:17:26,001 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43884, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-06T10:17:26,001 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43892, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-06T10:17:26,001 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43878, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-06T10:17:26,001 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43880, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-06T10:17:26,002 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43900, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-06T10:17:26,003 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=47, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=47, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-06T10:17:26,003 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=48, ppid=47, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-06T10:17:26,003 DEBUG [hconnection-0x6ebb124e-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-06T10:17:26,004 DEBUG [hconnection-0x115cd1b-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-06T10:17:26,005 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43906, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-06T10:17:26,005 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43914, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-06T10:17:26,006 DEBUG [hconnection-0x13563c7b-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-06T10:17:26,008 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43930, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-06T10:17:26,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(8581): Flush requested on cba9fa4bb7ad0155608756e918c3bf01 2024-12-06T10:17:26,023 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing cba9fa4bb7ad0155608756e918c3bf01 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-06T10:17:26,026 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cba9fa4bb7ad0155608756e918c3bf01, store=A 2024-12-06T10:17:26,026 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:17:26,026 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cba9fa4bb7ad0155608756e918c3bf01, store=B 2024-12-06T10:17:26,026 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:17:26,026 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cba9fa4bb7ad0155608756e918c3bf01, store=C 2024-12-06T10:17:26,026 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:17:26,077 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:26,077 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024120680a510d9689b4f8093bdb169f48e919a_cba9fa4bb7ad0155608756e918c3bf01 is 50, key is test_row_0/A:col10/1733480246023/Put/seqid=0 2024-12-06T10:17:26,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43884 deadline: 1733480306072, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:26,078 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:26,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43880 deadline: 1733480306073, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:26,078 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:26,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 7 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43930 deadline: 1733480306071, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:26,085 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:26,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43906 deadline: 1733480306077, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:26,086 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:26,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43864 deadline: 1733480306077, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:26,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=47 2024-12-06T10:17:26,103 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073741985_1161 (size=12154) 2024-12-06T10:17:26,155 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,33397,1733480204743 2024-12-06T10:17:26,155 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33397 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=48 2024-12-06T10:17:26,155 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01. 2024-12-06T10:17:26,156 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01. as already flushing 2024-12-06T10:17:26,156 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01. 2024-12-06T10:17:26,156 ERROR [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] handler.RSProcedureHandler(58): pid=48 java.io.IOException: Unable to complete flush {ENCODED => cba9fa4bb7ad0155608756e918c3bf01, NAME => 'TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:17:26,156 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=48 java.io.IOException: Unable to complete flush {ENCODED => cba9fa4bb7ad0155608756e918c3bf01, NAME => 'TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:17:26,157 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.HMaster(4114): Remote procedure failed, pid=48 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => cba9fa4bb7ad0155608756e918c3bf01, NAME => 'TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => cba9fa4bb7ad0155608756e918c3bf01, NAME => 'TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:17:26,181 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:26,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43884 deadline: 1733480306180, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:26,182 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:26,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43930 deadline: 1733480306181, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:26,182 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:26,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43880 deadline: 1733480306181, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:26,190 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:26,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43906 deadline: 1733480306187, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:26,196 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:26,196 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43864 deadline: 1733480306188, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:26,303 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=47 2024-12-06T10:17:26,311 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,33397,1733480204743 2024-12-06T10:17:26,312 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33397 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=48 2024-12-06T10:17:26,312 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01. 2024-12-06T10:17:26,312 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01. as already flushing 2024-12-06T10:17:26,312 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01. 2024-12-06T10:17:26,312 ERROR [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] handler.RSProcedureHandler(58): pid=48 java.io.IOException: Unable to complete flush {ENCODED => cba9fa4bb7ad0155608756e918c3bf01, NAME => 'TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:17:26,313 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=48 java.io.IOException: Unable to complete flush {ENCODED => cba9fa4bb7ad0155608756e918c3bf01, NAME => 'TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:17:26,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.HMaster(4114): Remote procedure failed, pid=48 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => cba9fa4bb7ad0155608756e918c3bf01, NAME => 'TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => cba9fa4bb7ad0155608756e918c3bf01, NAME => 'TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:17:26,385 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:26,385 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:26,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43884 deadline: 1733480306384, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:26,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43930 deadline: 1733480306385, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:26,386 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:26,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43880 deadline: 1733480306385, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:26,393 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:26,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43906 deadline: 1733480306392, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:26,398 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:26,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43864 deadline: 1733480306398, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:26,470 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,33397,1733480204743 2024-12-06T10:17:26,470 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33397 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=48 2024-12-06T10:17:26,472 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01. 2024-12-06T10:17:26,472 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01. as already flushing 2024-12-06T10:17:26,472 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01. 2024-12-06T10:17:26,472 ERROR [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] handler.RSProcedureHandler(58): pid=48 java.io.IOException: Unable to complete flush {ENCODED => cba9fa4bb7ad0155608756e918c3bf01, NAME => 'TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:17:26,472 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=48 java.io.IOException: Unable to complete flush {ENCODED => cba9fa4bb7ad0155608756e918c3bf01, NAME => 'TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:17:26,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.HMaster(4114): Remote procedure failed, pid=48 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => cba9fa4bb7ad0155608756e918c3bf01, NAME => 'TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => cba9fa4bb7ad0155608756e918c3bf01, NAME => 'TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:17:26,505 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:26,510 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024120680a510d9689b4f8093bdb169f48e919a_cba9fa4bb7ad0155608756e918c3bf01 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024120680a510d9689b4f8093bdb169f48e919a_cba9fa4bb7ad0155608756e918c3bf01 2024-12-06T10:17:26,513 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/.tmp/A/2b5b6b570a4f4312a0ccd0dbe302639d, store: [table=TestAcidGuarantees family=A region=cba9fa4bb7ad0155608756e918c3bf01] 2024-12-06T10:17:26,523 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/.tmp/A/2b5b6b570a4f4312a0ccd0dbe302639d is 175, key is test_row_0/A:col10/1733480246023/Put/seqid=0 2024-12-06T10:17:26,563 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073741986_1162 (size=30955) 2024-12-06T10:17:26,564 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=16, memsize=20.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/.tmp/A/2b5b6b570a4f4312a0ccd0dbe302639d 2024-12-06T10:17:26,603 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/.tmp/B/cce5f249efc844d19596f4d099c1497c is 50, key is test_row_0/B:col10/1733480246023/Put/seqid=0 2024-12-06T10:17:26,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=47 2024-12-06T10:17:26,625 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,33397,1733480204743 2024-12-06T10:17:26,625 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33397 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=48 2024-12-06T10:17:26,625 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01. 2024-12-06T10:17:26,625 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01. as already flushing 2024-12-06T10:17:26,625 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01. 2024-12-06T10:17:26,626 ERROR [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] handler.RSProcedureHandler(58): pid=48 java.io.IOException: Unable to complete flush {ENCODED => cba9fa4bb7ad0155608756e918c3bf01, NAME => 'TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:17:26,626 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=48 java.io.IOException: Unable to complete flush {ENCODED => cba9fa4bb7ad0155608756e918c3bf01, NAME => 'TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:17:26,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.HMaster(4114): Remote procedure failed, pid=48 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => cba9fa4bb7ad0155608756e918c3bf01, NAME => 'TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => cba9fa4bb7ad0155608756e918c3bf01, NAME => 'TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:17:26,649 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073741987_1163 (size=12001) 2024-12-06T10:17:26,651 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=16 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/.tmp/B/cce5f249efc844d19596f4d099c1497c 2024-12-06T10:17:26,683 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/.tmp/C/e1abb72ba51d40f2a3505efdad69b47c is 50, key is test_row_0/C:col10/1733480246023/Put/seqid=0 2024-12-06T10:17:26,689 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:26,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43884 deadline: 1733480306689, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:26,690 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:26,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43880 deadline: 1733480306689, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:26,691 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:26,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43930 deadline: 1733480306690, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:26,696 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:26,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43906 deadline: 1733480306696, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:26,700 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:26,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43864 deadline: 1733480306700, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:26,715 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073741988_1164 (size=12001) 2024-12-06T10:17:26,778 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,33397,1733480204743 2024-12-06T10:17:26,778 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33397 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=48 2024-12-06T10:17:26,779 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01. 2024-12-06T10:17:26,779 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01. as already flushing 2024-12-06T10:17:26,779 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01. 2024-12-06T10:17:26,779 ERROR [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] handler.RSProcedureHandler(58): pid=48 java.io.IOException: Unable to complete flush {ENCODED => cba9fa4bb7ad0155608756e918c3bf01, NAME => 'TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:17:26,779 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=48 java.io.IOException: Unable to complete flush {ENCODED => cba9fa4bb7ad0155608756e918c3bf01, NAME => 'TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:17:26,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.HMaster(4114): Remote procedure failed, pid=48 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => cba9fa4bb7ad0155608756e918c3bf01, NAME => 'TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => cba9fa4bb7ad0155608756e918c3bf01, NAME => 'TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:17:26,932 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,33397,1733480204743 2024-12-06T10:17:26,932 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33397 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=48 2024-12-06T10:17:26,932 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01. 2024-12-06T10:17:26,933 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01. as already flushing 2024-12-06T10:17:26,933 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01. 2024-12-06T10:17:26,933 ERROR [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] handler.RSProcedureHandler(58): pid=48 java.io.IOException: Unable to complete flush {ENCODED => cba9fa4bb7ad0155608756e918c3bf01, NAME => 'TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:17:26,933 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=48 java.io.IOException: Unable to complete flush {ENCODED => cba9fa4bb7ad0155608756e918c3bf01, NAME => 'TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:17:26,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.HMaster(4114): Remote procedure failed, pid=48 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => cba9fa4bb7ad0155608756e918c3bf01, NAME => 'TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => cba9fa4bb7ad0155608756e918c3bf01, NAME => 'TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:17:27,086 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,33397,1733480204743 2024-12-06T10:17:27,087 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33397 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=48 2024-12-06T10:17:27,087 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01. 2024-12-06T10:17:27,087 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01. as already flushing 2024-12-06T10:17:27,087 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01. 2024-12-06T10:17:27,087 ERROR [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] handler.RSProcedureHandler(58): pid=48 java.io.IOException: Unable to complete flush {ENCODED => cba9fa4bb7ad0155608756e918c3bf01, NAME => 'TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:17:27,087 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=48 java.io.IOException: Unable to complete flush {ENCODED => cba9fa4bb7ad0155608756e918c3bf01, NAME => 'TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:17:27,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.HMaster(4114): Remote procedure failed, pid=48 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => cba9fa4bb7ad0155608756e918c3bf01, NAME => 'TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => cba9fa4bb7ad0155608756e918c3bf01, NAME => 'TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:17:27,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=47 2024-12-06T10:17:27,116 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=16 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/.tmp/C/e1abb72ba51d40f2a3505efdad69b47c 2024-12-06T10:17:27,127 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/.tmp/A/2b5b6b570a4f4312a0ccd0dbe302639d as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/A/2b5b6b570a4f4312a0ccd0dbe302639d 2024-12-06T10:17:27,135 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/A/2b5b6b570a4f4312a0ccd0dbe302639d, entries=150, sequenceid=16, filesize=30.2 K 2024-12-06T10:17:27,137 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/.tmp/B/cce5f249efc844d19596f4d099c1497c as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/B/cce5f249efc844d19596f4d099c1497c 2024-12-06T10:17:27,149 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/B/cce5f249efc844d19596f4d099c1497c, entries=150, sequenceid=16, filesize=11.7 K 2024-12-06T10:17:27,150 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/.tmp/C/e1abb72ba51d40f2a3505efdad69b47c as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/C/e1abb72ba51d40f2a3505efdad69b47c 2024-12-06T10:17:27,155 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/C/e1abb72ba51d40f2a3505efdad69b47c, entries=150, sequenceid=16, filesize=11.7 K 2024-12-06T10:17:27,156 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=147.60 KB/151140 for cba9fa4bb7ad0155608756e918c3bf01 in 1133ms, sequenceid=16, compaction requested=false 2024-12-06T10:17:27,156 DEBUG [MemStoreFlusher.0 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestAcidGuarantees' 2024-12-06T10:17:27,157 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for cba9fa4bb7ad0155608756e918c3bf01: 2024-12-06T10:17:27,195 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing cba9fa4bb7ad0155608756e918c3bf01 3/3 column families, dataSize=154.31 KB heapSize=405.05 KB 2024-12-06T10:17:27,195 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(8581): Flush requested on cba9fa4bb7ad0155608756e918c3bf01 2024-12-06T10:17:27,195 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cba9fa4bb7ad0155608756e918c3bf01, store=A 2024-12-06T10:17:27,195 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:17:27,195 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cba9fa4bb7ad0155608756e918c3bf01, store=B 2024-12-06T10:17:27,195 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:17:27,195 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cba9fa4bb7ad0155608756e918c3bf01, store=C 2024-12-06T10:17:27,195 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:17:27,208 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412068f178dc2f64043fb9ca91273c82cbfcf_cba9fa4bb7ad0155608756e918c3bf01 is 50, key is test_row_0/A:col10/1733480246073/Put/seqid=0 2024-12-06T10:17:27,218 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:27,219 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43930 deadline: 1733480307211, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:27,219 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:27,219 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43906 deadline: 1733480307213, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:27,219 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:27,220 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43880 deadline: 1733480307215, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:27,225 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:27,225 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43864 deadline: 1733480307219, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:27,225 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:27,226 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43884 deadline: 1733480307219, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:27,230 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073741989_1165 (size=14594) 2024-12-06T10:17:27,231 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:27,236 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412068f178dc2f64043fb9ca91273c82cbfcf_cba9fa4bb7ad0155608756e918c3bf01 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412068f178dc2f64043fb9ca91273c82cbfcf_cba9fa4bb7ad0155608756e918c3bf01 2024-12-06T10:17:27,238 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/.tmp/A/d391df585f7140bd8830cc9e35b1ceb6, store: [table=TestAcidGuarantees family=A region=cba9fa4bb7ad0155608756e918c3bf01] 2024-12-06T10:17:27,238 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/.tmp/A/d391df585f7140bd8830cc9e35b1ceb6 is 175, key is test_row_0/A:col10/1733480246073/Put/seqid=0 2024-12-06T10:17:27,240 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,33397,1733480204743 2024-12-06T10:17:27,240 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33397 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=48 2024-12-06T10:17:27,241 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01. 2024-12-06T10:17:27,241 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01. as already flushing 2024-12-06T10:17:27,241 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01. 2024-12-06T10:17:27,241 ERROR [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] handler.RSProcedureHandler(58): pid=48 java.io.IOException: Unable to complete flush {ENCODED => cba9fa4bb7ad0155608756e918c3bf01, NAME => 'TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:17:27,241 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=48 java.io.IOException: Unable to complete flush {ENCODED => cba9fa4bb7ad0155608756e918c3bf01, NAME => 'TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:17:27,242 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.HMaster(4114): Remote procedure failed, pid=48 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => cba9fa4bb7ad0155608756e918c3bf01, NAME => 'TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => cba9fa4bb7ad0155608756e918c3bf01, NAME => 'TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:17:27,254 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073741990_1166 (size=39549) 2024-12-06T10:17:27,255 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=42, memsize=51.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/.tmp/A/d391df585f7140bd8830cc9e35b1ceb6 2024-12-06T10:17:27,271 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/.tmp/B/d1eee93923bf49c2a22b8e73597c594e is 50, key is test_row_0/B:col10/1733480246073/Put/seqid=0 2024-12-06T10:17:27,314 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073741991_1167 (size=12001) 2024-12-06T10:17:27,315 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=42 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/.tmp/B/d1eee93923bf49c2a22b8e73597c594e 2024-12-06T10:17:27,326 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:27,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43906 deadline: 1733480307321, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:27,328 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:27,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43930 deadline: 1733480307321, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:27,328 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:27,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43880 deadline: 1733480307321, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:27,331 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:27,331 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43864 deadline: 1733480307327, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:27,331 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:27,331 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43884 deadline: 1733480307327, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:27,335 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/.tmp/C/03dd1168286845ddb6abbe10904e3e5a is 50, key is test_row_0/C:col10/1733480246073/Put/seqid=0 2024-12-06T10:17:27,353 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073741992_1168 (size=12001) 2024-12-06T10:17:27,354 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=42 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/.tmp/C/03dd1168286845ddb6abbe10904e3e5a 2024-12-06T10:17:27,362 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/.tmp/A/d391df585f7140bd8830cc9e35b1ceb6 as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/A/d391df585f7140bd8830cc9e35b1ceb6 2024-12-06T10:17:27,370 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/A/d391df585f7140bd8830cc9e35b1ceb6, entries=200, sequenceid=42, filesize=38.6 K 2024-12-06T10:17:27,372 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/.tmp/B/d1eee93923bf49c2a22b8e73597c594e as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/B/d1eee93923bf49c2a22b8e73597c594e 2024-12-06T10:17:27,380 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/B/d1eee93923bf49c2a22b8e73597c594e, entries=150, sequenceid=42, filesize=11.7 K 2024-12-06T10:17:27,381 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/.tmp/C/03dd1168286845ddb6abbe10904e3e5a as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/C/03dd1168286845ddb6abbe10904e3e5a 2024-12-06T10:17:27,388 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/C/03dd1168286845ddb6abbe10904e3e5a, entries=150, sequenceid=42, filesize=11.7 K 2024-12-06T10:17:27,389 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~154.31 KB/158010, heapSize ~405 KB/414720, currentSize=53.67 KB/54960 for cba9fa4bb7ad0155608756e918c3bf01 in 195ms, sequenceid=42, compaction requested=false 2024-12-06T10:17:27,389 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for cba9fa4bb7ad0155608756e918c3bf01: 2024-12-06T10:17:27,394 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,33397,1733480204743 2024-12-06T10:17:27,395 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33397 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=48 2024-12-06T10:17:27,395 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01. 2024-12-06T10:17:27,395 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegion(2837): Flushing cba9fa4bb7ad0155608756e918c3bf01 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-06T10:17:27,395 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cba9fa4bb7ad0155608756e918c3bf01, store=A 2024-12-06T10:17:27,396 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:17:27,396 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cba9fa4bb7ad0155608756e918c3bf01, store=B 2024-12-06T10:17:27,396 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:17:27,396 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cba9fa4bb7ad0155608756e918c3bf01, store=C 2024-12-06T10:17:27,396 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:17:27,418 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241206a8397f0145db4a779de4396e40d66477_cba9fa4bb7ad0155608756e918c3bf01 is 50, key is test_row_0/A:col10/1733480247200/Put/seqid=0 2024-12-06T10:17:27,435 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073741993_1169 (size=12154) 2024-12-06T10:17:27,436 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:27,441 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241206a8397f0145db4a779de4396e40d66477_cba9fa4bb7ad0155608756e918c3bf01 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241206a8397f0145db4a779de4396e40d66477_cba9fa4bb7ad0155608756e918c3bf01 2024-12-06T10:17:27,442 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/.tmp/A/997320fd15344913abb9d00b7c0ce718, store: [table=TestAcidGuarantees family=A region=cba9fa4bb7ad0155608756e918c3bf01] 2024-12-06T10:17:27,443 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/.tmp/A/997320fd15344913abb9d00b7c0ce718 is 175, key is test_row_0/A:col10/1733480247200/Put/seqid=0 2024-12-06T10:17:27,454 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073741994_1170 (size=30955) 2024-12-06T10:17:27,456 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=53, memsize=17.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/.tmp/A/997320fd15344913abb9d00b7c0ce718 2024-12-06T10:17:27,479 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/.tmp/B/76cc0e0d7f504dfcbd00ff624de9d565 is 50, key is test_row_0/B:col10/1733480247200/Put/seqid=0 2024-12-06T10:17:27,506 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073741995_1171 (size=12001) 2024-12-06T10:17:27,508 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=53 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/.tmp/B/76cc0e0d7f504dfcbd00ff624de9d565 2024-12-06T10:17:27,527 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/.tmp/C/aff476f723de4555845a28597189711e is 50, key is test_row_0/C:col10/1733480247200/Put/seqid=0 2024-12-06T10:17:27,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(8581): Flush requested on cba9fa4bb7ad0155608756e918c3bf01 2024-12-06T10:17:27,536 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01. as already flushing 2024-12-06T10:17:27,537 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073741996_1172 (size=12001) 2024-12-06T10:17:27,539 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=53 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/.tmp/C/aff476f723de4555845a28597189711e 2024-12-06T10:17:27,547 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/.tmp/A/997320fd15344913abb9d00b7c0ce718 as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/A/997320fd15344913abb9d00b7c0ce718 2024-12-06T10:17:27,553 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/A/997320fd15344913abb9d00b7c0ce718, entries=150, sequenceid=53, filesize=30.2 K 2024-12-06T10:17:27,555 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/.tmp/B/76cc0e0d7f504dfcbd00ff624de9d565 as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/B/76cc0e0d7f504dfcbd00ff624de9d565 2024-12-06T10:17:27,561 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/B/76cc0e0d7f504dfcbd00ff624de9d565, entries=150, sequenceid=53, filesize=11.7 K 2024-12-06T10:17:27,563 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/.tmp/C/aff476f723de4555845a28597189711e as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/C/aff476f723de4555845a28597189711e 2024-12-06T10:17:27,570 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/C/aff476f723de4555845a28597189711e, entries=150, sequenceid=53, filesize=11.7 K 2024-12-06T10:17:27,571 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=87.22 KB/89310 for cba9fa4bb7ad0155608756e918c3bf01 in 175ms, sequenceid=53, compaction requested=true 2024-12-06T10:17:27,571 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegion(2538): Flush status journal for cba9fa4bb7ad0155608756e918c3bf01: 2024-12-06T10:17:27,571 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01. 2024-12-06T10:17:27,571 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=48 2024-12-06T10:17:27,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.HMaster(4106): Remote procedure done, pid=48 2024-12-06T10:17:27,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(8581): Flush requested on cba9fa4bb7ad0155608756e918c3bf01 2024-12-06T10:17:27,572 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing cba9fa4bb7ad0155608756e918c3bf01 3/3 column families, dataSize=93.93 KB heapSize=246.84 KB 2024-12-06T10:17:27,572 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cba9fa4bb7ad0155608756e918c3bf01, store=A 2024-12-06T10:17:27,573 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:17:27,573 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cba9fa4bb7ad0155608756e918c3bf01, store=B 2024-12-06T10:17:27,573 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:17:27,573 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cba9fa4bb7ad0155608756e918c3bf01, store=C 2024-12-06T10:17:27,573 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:17:27,574 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=48, resume processing ppid=47 2024-12-06T10:17:27,574 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=48, ppid=47, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.5690 sec 2024-12-06T10:17:27,577 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=47, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=47, table=TestAcidGuarantees in 1.5770 sec 2024-12-06T10:17:27,598 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412060f002c125a74459ab1654b64a76ecadb_cba9fa4bb7ad0155608756e918c3bf01 is 50, key is test_row_0/A:col10/1733480247571/Put/seqid=0 2024-12-06T10:17:27,610 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:27,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43930 deadline: 1733480307599, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:27,611 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:27,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43864 deadline: 1733480307605, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:27,611 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:27,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43906 deadline: 1733480307606, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:27,612 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:27,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43880 deadline: 1733480307607, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:27,612 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:27,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43884 deadline: 1733480307610, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:27,622 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073741997_1173 (size=21918) 2024-12-06T10:17:27,623 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:27,633 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412060f002c125a74459ab1654b64a76ecadb_cba9fa4bb7ad0155608756e918c3bf01 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412060f002c125a74459ab1654b64a76ecadb_cba9fa4bb7ad0155608756e918c3bf01 2024-12-06T10:17:27,634 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/.tmp/A/fd2a41dfc2d1455b98e91cad56cf20c0, store: [table=TestAcidGuarantees family=A region=cba9fa4bb7ad0155608756e918c3bf01] 2024-12-06T10:17:27,635 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/.tmp/A/fd2a41dfc2d1455b98e91cad56cf20c0 is 175, key is test_row_0/A:col10/1733480247571/Put/seqid=0 2024-12-06T10:17:27,650 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073741998_1174 (size=65323) 2024-12-06T10:17:27,652 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=70, memsize=31.3 K, hasBloomFilter=true, into tmp file hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/.tmp/A/fd2a41dfc2d1455b98e91cad56cf20c0 2024-12-06T10:17:27,665 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/.tmp/B/75dee3f4c0164e59aac118b2181f301f is 50, key is test_row_0/B:col10/1733480247571/Put/seqid=0 2024-12-06T10:17:27,679 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073741999_1175 (size=12001) 2024-12-06T10:17:27,682 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=70 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/.tmp/B/75dee3f4c0164e59aac118b2181f301f 2024-12-06T10:17:27,692 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/.tmp/C/c1c52fe299344af2b18c548144278bfc is 50, key is test_row_0/C:col10/1733480247571/Put/seqid=0 2024-12-06T10:17:27,702 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742000_1176 (size=12001) 2024-12-06T10:17:27,712 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:27,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43930 deadline: 1733480307712, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:27,717 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:27,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43864 deadline: 1733480307712, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:27,718 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:27,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43906 deadline: 1733480307713, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:27,721 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:27,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43880 deadline: 1733480307714, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:27,722 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:27,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43884 deadline: 1733480307716, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:27,819 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-12-06T10:17:27,916 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:27,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43930 deadline: 1733480307915, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:27,922 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:27,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43864 deadline: 1733480307919, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:27,922 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:27,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43906 deadline: 1733480307919, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:27,923 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:27,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43880 deadline: 1733480307923, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:27,925 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:27,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43884 deadline: 1733480307923, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:28,103 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=70 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/.tmp/C/c1c52fe299344af2b18c548144278bfc 2024-12-06T10:17:28,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=47 2024-12-06T10:17:28,106 INFO [Thread-774 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 47 completed 2024-12-06T10:17:28,109 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-06T10:17:28,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] procedure2.ProcedureExecutor(1098): Stored pid=49, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=49, table=TestAcidGuarantees 2024-12-06T10:17:28,112 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=49, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=49, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-06T10:17:28,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=49 2024-12-06T10:17:28,113 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=49, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=49, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-06T10:17:28,113 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=50, ppid=49, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-06T10:17:28,116 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/.tmp/A/fd2a41dfc2d1455b98e91cad56cf20c0 as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/A/fd2a41dfc2d1455b98e91cad56cf20c0 2024-12-06T10:17:28,121 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/A/fd2a41dfc2d1455b98e91cad56cf20c0, entries=350, sequenceid=70, filesize=63.8 K 2024-12-06T10:17:28,123 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/.tmp/B/75dee3f4c0164e59aac118b2181f301f as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/B/75dee3f4c0164e59aac118b2181f301f 2024-12-06T10:17:28,128 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/B/75dee3f4c0164e59aac118b2181f301f, entries=150, sequenceid=70, filesize=11.7 K 2024-12-06T10:17:28,129 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/.tmp/C/c1c52fe299344af2b18c548144278bfc as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/C/c1c52fe299344af2b18c548144278bfc 2024-12-06T10:17:28,134 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/C/c1c52fe299344af2b18c548144278bfc, entries=150, sequenceid=70, filesize=11.7 K 2024-12-06T10:17:28,136 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~93.93 KB/96180, heapSize ~246.80 KB/252720, currentSize=107.34 KB/109920 for cba9fa4bb7ad0155608756e918c3bf01 in 564ms, sequenceid=70, compaction requested=true 2024-12-06T10:17:28,136 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for cba9fa4bb7ad0155608756e918c3bf01: 2024-12-06T10:17:28,136 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-06T10:17:28,137 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store cba9fa4bb7ad0155608756e918c3bf01:A, priority=-2147483648, current under compaction store size is 1 2024-12-06T10:17:28,138 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 166782 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-06T10:17:28,138 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HStore(1540): cba9fa4bb7ad0155608756e918c3bf01/A is initiating minor compaction (all files) 2024-12-06T10:17:28,138 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of cba9fa4bb7ad0155608756e918c3bf01/A in TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01. 2024-12-06T10:17:28,138 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/A/2b5b6b570a4f4312a0ccd0dbe302639d, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/A/d391df585f7140bd8830cc9e35b1ceb6, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/A/997320fd15344913abb9d00b7c0ce718, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/A/fd2a41dfc2d1455b98e91cad56cf20c0] into tmpdir=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/.tmp, totalSize=162.9 K 2024-12-06T10:17:28,138 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=12 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01. 2024-12-06T10:17:28,138 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01. files: [hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/A/2b5b6b570a4f4312a0ccd0dbe302639d, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/A/d391df585f7140bd8830cc9e35b1ceb6, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/A/997320fd15344913abb9d00b7c0ce718, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/A/fd2a41dfc2d1455b98e91cad56cf20c0] 2024-12-06T10:17:28,138 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T10:17:28,139 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.Compactor(224): Compacting 2b5b6b570a4f4312a0ccd0dbe302639d, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=16, earliestPutTs=1733480246019 2024-12-06T10:17:28,139 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.Compactor(224): Compacting d391df585f7140bd8830cc9e35b1ceb6, keycount=200, bloomtype=ROW, size=38.6 K, encoding=NONE, compression=NONE, seqNum=42, earliestPutTs=1733480246063 2024-12-06T10:17:28,140 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-06T10:17:28,140 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store cba9fa4bb7ad0155608756e918c3bf01:B, priority=-2147483648, current under compaction store size is 2 2024-12-06T10:17:28,140 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T10:17:28,140 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store cba9fa4bb7ad0155608756e918c3bf01:C, priority=-2147483648, current under compaction store size is 3 2024-12-06T10:17:28,140 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-06T10:17:28,140 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.Compactor(224): Compacting 997320fd15344913abb9d00b7c0ce718, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=53, earliestPutTs=1733480247200 2024-12-06T10:17:28,141 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.Compactor(224): Compacting fd2a41dfc2d1455b98e91cad56cf20c0, keycount=350, bloomtype=ROW, size=63.8 K, encoding=NONE, compression=NONE, seqNum=70, earliestPutTs=1733480247544 2024-12-06T10:17:28,141 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48004 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-06T10:17:28,142 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HStore(1540): cba9fa4bb7ad0155608756e918c3bf01/B is initiating minor compaction (all files) 2024-12-06T10:17:28,142 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of cba9fa4bb7ad0155608756e918c3bf01/B in TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01. 2024-12-06T10:17:28,142 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/B/cce5f249efc844d19596f4d099c1497c, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/B/d1eee93923bf49c2a22b8e73597c594e, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/B/76cc0e0d7f504dfcbd00ff624de9d565, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/B/75dee3f4c0164e59aac118b2181f301f] into tmpdir=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/.tmp, totalSize=46.9 K 2024-12-06T10:17:28,142 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.Compactor(224): Compacting cce5f249efc844d19596f4d099c1497c, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=16, earliestPutTs=1733480246019 2024-12-06T10:17:28,143 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.Compactor(224): Compacting d1eee93923bf49c2a22b8e73597c594e, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=42, earliestPutTs=1733480246073 2024-12-06T10:17:28,144 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.Compactor(224): Compacting 76cc0e0d7f504dfcbd00ff624de9d565, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=53, earliestPutTs=1733480247200 2024-12-06T10:17:28,146 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.Compactor(224): Compacting 75dee3f4c0164e59aac118b2181f301f, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=70, earliestPutTs=1733480247566 2024-12-06T10:17:28,163 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): cba9fa4bb7ad0155608756e918c3bf01#B#compaction#155 average throughput is 2.18 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-06T10:17:28,164 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/.tmp/B/54fe9dde453d4b2ca2b7257384f16459 is 50, key is test_row_0/B:col10/1733480247571/Put/seqid=0 2024-12-06T10:17:28,165 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=cba9fa4bb7ad0155608756e918c3bf01] 2024-12-06T10:17:28,172 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e202412066b90d808bd614660b38c0419ba62307a_cba9fa4bb7ad0155608756e918c3bf01 store=[table=TestAcidGuarantees family=A region=cba9fa4bb7ad0155608756e918c3bf01] 2024-12-06T10:17:28,177 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742001_1177 (size=12139) 2024-12-06T10:17:28,179 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e202412066b90d808bd614660b38c0419ba62307a_cba9fa4bb7ad0155608756e918c3bf01, store=[table=TestAcidGuarantees family=A region=cba9fa4bb7ad0155608756e918c3bf01] 2024-12-06T10:17:28,179 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412066b90d808bd614660b38c0419ba62307a_cba9fa4bb7ad0155608756e918c3bf01 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=cba9fa4bb7ad0155608756e918c3bf01] 2024-12-06T10:17:28,209 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742002_1178 (size=4469) 2024-12-06T10:17:28,210 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): cba9fa4bb7ad0155608756e918c3bf01#A#compaction#156 average throughput is 0.56 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-06T10:17:28,212 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=49 2024-12-06T10:17:28,213 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/.tmp/A/064931b8b64440a5a32b69f20fb43a97 is 175, key is test_row_0/A:col10/1733480247571/Put/seqid=0 2024-12-06T10:17:28,221 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(8581): Flush requested on cba9fa4bb7ad0155608756e918c3bf01 2024-12-06T10:17:28,221 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing cba9fa4bb7ad0155608756e918c3bf01 3/3 column families, dataSize=114.05 KB heapSize=299.58 KB 2024-12-06T10:17:28,222 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cba9fa4bb7ad0155608756e918c3bf01, store=A 2024-12-06T10:17:28,222 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:17:28,222 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cba9fa4bb7ad0155608756e918c3bf01, store=B 2024-12-06T10:17:28,222 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:17:28,222 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cba9fa4bb7ad0155608756e918c3bf01, store=C 2024-12-06T10:17:28,222 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:17:28,232 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742003_1179 (size=31093) 2024-12-06T10:17:28,239 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/.tmp/A/064931b8b64440a5a32b69f20fb43a97 as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/A/064931b8b64440a5a32b69f20fb43a97 2024-12-06T10:17:28,246 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412064d0e44050701487eb9bb00a7015ca838_cba9fa4bb7ad0155608756e918c3bf01 is 50, key is test_row_0/A:col10/1733480248219/Put/seqid=0 2024-12-06T10:17:28,247 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in cba9fa4bb7ad0155608756e918c3bf01/A of cba9fa4bb7ad0155608756e918c3bf01 into 064931b8b64440a5a32b69f20fb43a97(size=30.4 K), total size for store is 30.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-06T10:17:28,247 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for cba9fa4bb7ad0155608756e918c3bf01: 2024-12-06T10:17:28,247 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01., storeName=cba9fa4bb7ad0155608756e918c3bf01/A, priority=12, startTime=1733480248136; duration=0sec 2024-12-06T10:17:28,247 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:28,248 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43880 deadline: 1733480308239, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:28,248 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:28,248 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43930 deadline: 1733480308240, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:28,248 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-06T10:17:28,248 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: cba9fa4bb7ad0155608756e918c3bf01:A 2024-12-06T10:17:28,249 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-06T10:17:28,249 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:28,249 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43884 deadline: 1733480308241, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:28,250 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:28,250 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43864 deadline: 1733480308245, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:28,250 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48004 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-06T10:17:28,251 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HStore(1540): cba9fa4bb7ad0155608756e918c3bf01/C is initiating minor compaction (all files) 2024-12-06T10:17:28,251 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of cba9fa4bb7ad0155608756e918c3bf01/C in TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01. 2024-12-06T10:17:28,251 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/C/e1abb72ba51d40f2a3505efdad69b47c, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/C/03dd1168286845ddb6abbe10904e3e5a, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/C/aff476f723de4555845a28597189711e, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/C/c1c52fe299344af2b18c548144278bfc] into tmpdir=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/.tmp, totalSize=46.9 K 2024-12-06T10:17:28,251 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.Compactor(224): Compacting e1abb72ba51d40f2a3505efdad69b47c, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=16, earliestPutTs=1733480246019 2024-12-06T10:17:28,252 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.Compactor(224): Compacting 03dd1168286845ddb6abbe10904e3e5a, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=42, earliestPutTs=1733480246073 2024-12-06T10:17:28,252 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:28,252 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43906 deadline: 1733480308247, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:28,252 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.Compactor(224): Compacting aff476f723de4555845a28597189711e, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=53, earliestPutTs=1733480247200 2024-12-06T10:17:28,253 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.Compactor(224): Compacting c1c52fe299344af2b18c548144278bfc, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=70, earliestPutTs=1733480247566 2024-12-06T10:17:28,265 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,33397,1733480204743 2024-12-06T10:17:28,266 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33397 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=50 2024-12-06T10:17:28,266 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01. 2024-12-06T10:17:28,266 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01. as already flushing 2024-12-06T10:17:28,266 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01. 2024-12-06T10:17:28,266 ERROR [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] handler.RSProcedureHandler(58): pid=50 java.io.IOException: Unable to complete flush {ENCODED => cba9fa4bb7ad0155608756e918c3bf01, NAME => 'TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:17:28,266 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=50 java.io.IOException: Unable to complete flush {ENCODED => cba9fa4bb7ad0155608756e918c3bf01, NAME => 'TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:17:28,267 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.HMaster(4114): Remote procedure failed, pid=50 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => cba9fa4bb7ad0155608756e918c3bf01, NAME => 'TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => cba9fa4bb7ad0155608756e918c3bf01, NAME => 'TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:17:28,287 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742004_1180 (size=12154) 2024-12-06T10:17:28,291 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): cba9fa4bb7ad0155608756e918c3bf01#C#compaction#158 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-06T10:17:28,291 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/.tmp/C/b482c3d0b7554e35996b8da20c4dce86 is 50, key is test_row_0/C:col10/1733480247571/Put/seqid=0 2024-12-06T10:17:28,312 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742005_1181 (size=12139) 2024-12-06T10:17:28,349 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:28,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43880 deadline: 1733480308349, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:28,353 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:28,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43930 deadline: 1733480308350, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:28,355 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:28,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43884 deadline: 1733480308351, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:28,356 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:28,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43864 deadline: 1733480308351, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:28,356 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:28,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43906 deadline: 1733480308353, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:28,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=49 2024-12-06T10:17:28,418 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,33397,1733480204743 2024-12-06T10:17:28,419 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33397 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=50 2024-12-06T10:17:28,419 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01. 2024-12-06T10:17:28,419 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01. as already flushing 2024-12-06T10:17:28,419 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01. 2024-12-06T10:17:28,419 ERROR [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] handler.RSProcedureHandler(58): pid=50 java.io.IOException: Unable to complete flush {ENCODED => cba9fa4bb7ad0155608756e918c3bf01, NAME => 'TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:17:28,419 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=50 java.io.IOException: Unable to complete flush {ENCODED => cba9fa4bb7ad0155608756e918c3bf01, NAME => 'TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:17:28,420 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.HMaster(4114): Remote procedure failed, pid=50 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => cba9fa4bb7ad0155608756e918c3bf01, NAME => 'TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => cba9fa4bb7ad0155608756e918c3bf01, NAME => 'TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:17:28,554 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:28,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43880 deadline: 1733480308552, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:28,555 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:28,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43930 deadline: 1733480308555, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:28,558 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:28,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43884 deadline: 1733480308557, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:28,562 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:28,562 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:28,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43864 deadline: 1733480308558, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:28,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43906 deadline: 1733480308558, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:28,572 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,33397,1733480204743 2024-12-06T10:17:28,572 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33397 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=50 2024-12-06T10:17:28,572 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01. 2024-12-06T10:17:28,573 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01. as already flushing 2024-12-06T10:17:28,573 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01. 2024-12-06T10:17:28,573 ERROR [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] handler.RSProcedureHandler(58): pid=50 java.io.IOException: Unable to complete flush {ENCODED => cba9fa4bb7ad0155608756e918c3bf01, NAME => 'TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:17:28,573 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=50 java.io.IOException: Unable to complete flush {ENCODED => cba9fa4bb7ad0155608756e918c3bf01, NAME => 'TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:17:28,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.HMaster(4114): Remote procedure failed, pid=50 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => cba9fa4bb7ad0155608756e918c3bf01, NAME => 'TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => cba9fa4bb7ad0155608756e918c3bf01, NAME => 'TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:17:28,587 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/.tmp/B/54fe9dde453d4b2ca2b7257384f16459 as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/B/54fe9dde453d4b2ca2b7257384f16459 2024-12-06T10:17:28,594 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in cba9fa4bb7ad0155608756e918c3bf01/B of cba9fa4bb7ad0155608756e918c3bf01 into 54fe9dde453d4b2ca2b7257384f16459(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-06T10:17:28,595 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for cba9fa4bb7ad0155608756e918c3bf01: 2024-12-06T10:17:28,595 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01., storeName=cba9fa4bb7ad0155608756e918c3bf01/B, priority=12, startTime=1733480248139; duration=0sec 2024-12-06T10:17:28,595 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T10:17:28,595 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: cba9fa4bb7ad0155608756e918c3bf01:B 2024-12-06T10:17:28,688 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:28,693 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412064d0e44050701487eb9bb00a7015ca838_cba9fa4bb7ad0155608756e918c3bf01 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412064d0e44050701487eb9bb00a7015ca838_cba9fa4bb7ad0155608756e918c3bf01 2024-12-06T10:17:28,694 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/.tmp/A/a2b5f018e978499db43971231708c209, store: [table=TestAcidGuarantees family=A region=cba9fa4bb7ad0155608756e918c3bf01] 2024-12-06T10:17:28,694 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/.tmp/A/a2b5f018e978499db43971231708c209 is 175, key is test_row_0/A:col10/1733480248219/Put/seqid=0 2024-12-06T10:17:28,699 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742006_1182 (size=30955) 2024-12-06T10:17:28,699 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=90, memsize=38.0 K, hasBloomFilter=true, into tmp file hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/.tmp/A/a2b5f018e978499db43971231708c209 2024-12-06T10:17:28,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=49 2024-12-06T10:17:28,722 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/.tmp/C/b482c3d0b7554e35996b8da20c4dce86 as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/C/b482c3d0b7554e35996b8da20c4dce86 2024-12-06T10:17:28,725 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,33397,1733480204743 2024-12-06T10:17:28,725 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33397 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=50 2024-12-06T10:17:28,726 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01. 2024-12-06T10:17:28,726 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01. as already flushing 2024-12-06T10:17:28,726 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01. 2024-12-06T10:17:28,726 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/.tmp/B/73f15746812c411cb342f806e3554220 is 50, key is test_row_0/B:col10/1733480248219/Put/seqid=0 2024-12-06T10:17:28,726 ERROR [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] handler.RSProcedureHandler(58): pid=50 java.io.IOException: Unable to complete flush {ENCODED => cba9fa4bb7ad0155608756e918c3bf01, NAME => 'TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:17:28,726 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=50 java.io.IOException: Unable to complete flush {ENCODED => cba9fa4bb7ad0155608756e918c3bf01, NAME => 'TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:17:28,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.HMaster(4114): Remote procedure failed, pid=50 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => cba9fa4bb7ad0155608756e918c3bf01, NAME => 'TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => cba9fa4bb7ad0155608756e918c3bf01, NAME => 'TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:17:28,732 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in cba9fa4bb7ad0155608756e918c3bf01/C of cba9fa4bb7ad0155608756e918c3bf01 into b482c3d0b7554e35996b8da20c4dce86(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-06T10:17:28,732 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for cba9fa4bb7ad0155608756e918c3bf01: 2024-12-06T10:17:28,732 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01., storeName=cba9fa4bb7ad0155608756e918c3bf01/C, priority=12, startTime=1733480248140; duration=0sec 2024-12-06T10:17:28,732 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T10:17:28,732 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: cba9fa4bb7ad0155608756e918c3bf01:C 2024-12-06T10:17:28,753 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742007_1183 (size=12001) 2024-12-06T10:17:28,856 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:28,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43880 deadline: 1733480308856, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:28,859 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:28,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43930 deadline: 1733480308858, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:28,862 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:28,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43884 deadline: 1733480308862, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:28,866 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:28,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43864 deadline: 1733480308864, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:28,866 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:28,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43906 deadline: 1733480308864, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:28,878 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,33397,1733480204743 2024-12-06T10:17:28,879 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33397 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=50 2024-12-06T10:17:28,879 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01. 2024-12-06T10:17:28,879 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01. as already flushing 2024-12-06T10:17:28,880 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01. 2024-12-06T10:17:28,880 ERROR [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] handler.RSProcedureHandler(58): pid=50 java.io.IOException: Unable to complete flush {ENCODED => cba9fa4bb7ad0155608756e918c3bf01, NAME => 'TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:17:28,880 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=50 java.io.IOException: Unable to complete flush {ENCODED => cba9fa4bb7ad0155608756e918c3bf01, NAME => 'TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:17:28,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.HMaster(4114): Remote procedure failed, pid=50 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => cba9fa4bb7ad0155608756e918c3bf01, NAME => 'TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => cba9fa4bb7ad0155608756e918c3bf01, NAME => 'TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:17:29,032 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,33397,1733480204743 2024-12-06T10:17:29,033 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33397 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=50 2024-12-06T10:17:29,033 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01. 2024-12-06T10:17:29,033 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01. as already flushing 2024-12-06T10:17:29,033 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01. 2024-12-06T10:17:29,033 ERROR [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] handler.RSProcedureHandler(58): pid=50 java.io.IOException: Unable to complete flush {ENCODED => cba9fa4bb7ad0155608756e918c3bf01, NAME => 'TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:17:29,033 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=50 java.io.IOException: Unable to complete flush {ENCODED => cba9fa4bb7ad0155608756e918c3bf01, NAME => 'TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:17:29,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.HMaster(4114): Remote procedure failed, pid=50 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => cba9fa4bb7ad0155608756e918c3bf01, NAME => 'TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => cba9fa4bb7ad0155608756e918c3bf01, NAME => 'TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:17:29,154 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=90 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/.tmp/B/73f15746812c411cb342f806e3554220 2024-12-06T10:17:29,162 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/.tmp/C/042d1fb260e741b4a1999ed5e135fb2c is 50, key is test_row_0/C:col10/1733480248219/Put/seqid=0 2024-12-06T10:17:29,168 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742008_1184 (size=12001) 2024-12-06T10:17:29,169 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=90 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/.tmp/C/042d1fb260e741b4a1999ed5e135fb2c 2024-12-06T10:17:29,175 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/.tmp/A/a2b5f018e978499db43971231708c209 as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/A/a2b5f018e978499db43971231708c209 2024-12-06T10:17:29,180 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/A/a2b5f018e978499db43971231708c209, entries=150, sequenceid=90, filesize=30.2 K 2024-12-06T10:17:29,182 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/.tmp/B/73f15746812c411cb342f806e3554220 as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/B/73f15746812c411cb342f806e3554220 2024-12-06T10:17:29,185 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,33397,1733480204743 2024-12-06T10:17:29,186 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33397 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=50 2024-12-06T10:17:29,186 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01. 2024-12-06T10:17:29,186 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01. as already flushing 2024-12-06T10:17:29,186 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01. 2024-12-06T10:17:29,186 ERROR [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] handler.RSProcedureHandler(58): pid=50 java.io.IOException: Unable to complete flush {ENCODED => cba9fa4bb7ad0155608756e918c3bf01, NAME => 'TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:17:29,187 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=50 java.io.IOException: Unable to complete flush {ENCODED => cba9fa4bb7ad0155608756e918c3bf01, NAME => 'TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:17:29,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.HMaster(4114): Remote procedure failed, pid=50 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => cba9fa4bb7ad0155608756e918c3bf01, NAME => 'TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => cba9fa4bb7ad0155608756e918c3bf01, NAME => 'TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:17:29,189 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/B/73f15746812c411cb342f806e3554220, entries=150, sequenceid=90, filesize=11.7 K 2024-12-06T10:17:29,190 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/.tmp/C/042d1fb260e741b4a1999ed5e135fb2c as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/C/042d1fb260e741b4a1999ed5e135fb2c 2024-12-06T10:17:29,195 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/C/042d1fb260e741b4a1999ed5e135fb2c, entries=150, sequenceid=90, filesize=11.7 K 2024-12-06T10:17:29,197 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~114.05 KB/116790, heapSize ~299.53 KB/306720, currentSize=87.22 KB/89310 for cba9fa4bb7ad0155608756e918c3bf01 in 976ms, sequenceid=90, compaction requested=false 2024-12-06T10:17:29,197 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for cba9fa4bb7ad0155608756e918c3bf01: 2024-12-06T10:17:29,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=49 2024-12-06T10:17:29,339 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,33397,1733480204743 2024-12-06T10:17:29,340 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33397 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=50 2024-12-06T10:17:29,340 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01. 2024-12-06T10:17:29,340 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegion(2837): Flushing cba9fa4bb7ad0155608756e918c3bf01 3/3 column families, dataSize=87.22 KB heapSize=229.27 KB 2024-12-06T10:17:29,340 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cba9fa4bb7ad0155608756e918c3bf01, store=A 2024-12-06T10:17:29,340 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:17:29,340 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cba9fa4bb7ad0155608756e918c3bf01, store=B 2024-12-06T10:17:29,340 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:17:29,341 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cba9fa4bb7ad0155608756e918c3bf01, store=C 2024-12-06T10:17:29,341 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:17:29,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(8581): Flush requested on cba9fa4bb7ad0155608756e918c3bf01 2024-12-06T10:17:29,362 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01. as already flushing 2024-12-06T10:17:29,366 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241206f7776cb82c8149999814a60beab96a9b_cba9fa4bb7ad0155608756e918c3bf01 is 50, key is test_row_0/A:col10/1733480248243/Put/seqid=0 2024-12-06T10:17:29,380 INFO [master/552d6a33fa09:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-12-06T10:17:29,380 INFO [master/552d6a33fa09:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-12-06T10:17:29,397 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742009_1185 (size=12154) 2024-12-06T10:17:29,439 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:29,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43880 deadline: 1733480309434, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:29,440 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:29,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43906 deadline: 1733480309434, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:29,440 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:29,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43930 deadline: 1733480309434, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:29,441 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:29,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43884 deadline: 1733480309435, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:29,442 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:29,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43864 deadline: 1733480309436, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:29,542 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:29,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43906 deadline: 1733480309541, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:29,544 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:29,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43880 deadline: 1733480309541, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:29,546 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:29,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43884 deadline: 1733480309542, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:29,547 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:29,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43864 deadline: 1733480309543, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:29,548 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:29,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43930 deadline: 1733480309544, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:29,747 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:29,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43906 deadline: 1733480309745, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:29,748 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:29,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43880 deadline: 1733480309747, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:29,749 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:29,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43884 deadline: 1733480309748, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:29,751 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:29,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43864 deadline: 1733480309749, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:29,751 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:29,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43930 deadline: 1733480309750, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:29,798 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:29,803 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241206f7776cb82c8149999814a60beab96a9b_cba9fa4bb7ad0155608756e918c3bf01 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241206f7776cb82c8149999814a60beab96a9b_cba9fa4bb7ad0155608756e918c3bf01 2024-12-06T10:17:29,805 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/.tmp/A/672acde59450412cb03bf21f25565d91, store: [table=TestAcidGuarantees family=A region=cba9fa4bb7ad0155608756e918c3bf01] 2024-12-06T10:17:29,806 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/.tmp/A/672acde59450412cb03bf21f25565d91 is 175, key is test_row_0/A:col10/1733480248243/Put/seqid=0 2024-12-06T10:17:29,811 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742010_1186 (size=30955) 2024-12-06T10:17:30,049 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:30,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43906 deadline: 1733480310049, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:30,051 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:30,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43880 deadline: 1733480310050, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:30,051 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:30,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43884 deadline: 1733480310050, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:30,052 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:30,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43864 deadline: 1733480310052, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:30,053 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:30,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43930 deadline: 1733480310053, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:30,212 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=109, memsize=29.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/.tmp/A/672acde59450412cb03bf21f25565d91 2024-12-06T10:17:30,217 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=49 2024-12-06T10:17:30,222 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/.tmp/B/61f56e64c7cd4182adb4b6a0c122e7b3 is 50, key is test_row_0/B:col10/1733480248243/Put/seqid=0 2024-12-06T10:17:30,233 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742011_1187 (size=12001) 2024-12-06T10:17:30,235 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=109 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/.tmp/B/61f56e64c7cd4182adb4b6a0c122e7b3 2024-12-06T10:17:30,243 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/.tmp/C/08b016b343d2406d9172dfff65942a9f is 50, key is test_row_0/C:col10/1733480248243/Put/seqid=0 2024-12-06T10:17:30,263 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742012_1188 (size=12001) 2024-12-06T10:17:30,551 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:30,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43906 deadline: 1733480310550, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:30,555 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:30,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43880 deadline: 1733480310552, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:30,556 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:30,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43884 deadline: 1733480310553, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:30,556 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:30,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43864 deadline: 1733480310554, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:30,557 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:30,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43930 deadline: 1733480310556, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:30,667 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=109 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/.tmp/C/08b016b343d2406d9172dfff65942a9f 2024-12-06T10:17:30,674 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/.tmp/A/672acde59450412cb03bf21f25565d91 as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/A/672acde59450412cb03bf21f25565d91 2024-12-06T10:17:30,689 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/A/672acde59450412cb03bf21f25565d91, entries=150, sequenceid=109, filesize=30.2 K 2024-12-06T10:17:30,691 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/.tmp/B/61f56e64c7cd4182adb4b6a0c122e7b3 as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/B/61f56e64c7cd4182adb4b6a0c122e7b3 2024-12-06T10:17:30,696 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/B/61f56e64c7cd4182adb4b6a0c122e7b3, entries=150, sequenceid=109, filesize=11.7 K 2024-12-06T10:17:30,699 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/.tmp/C/08b016b343d2406d9172dfff65942a9f as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/C/08b016b343d2406d9172dfff65942a9f 2024-12-06T10:17:30,705 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/C/08b016b343d2406d9172dfff65942a9f, entries=150, sequenceid=109, filesize=11.7 K 2024-12-06T10:17:30,707 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegion(3040): Finished flush of dataSize ~87.22 KB/89310, heapSize ~229.22 KB/234720, currentSize=120.76 KB/123660 for cba9fa4bb7ad0155608756e918c3bf01 in 1366ms, sequenceid=109, compaction requested=true 2024-12-06T10:17:30,707 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegion(2538): Flush status journal for cba9fa4bb7ad0155608756e918c3bf01: 2024-12-06T10:17:30,707 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01. 2024-12-06T10:17:30,707 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=50 2024-12-06T10:17:30,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.HMaster(4106): Remote procedure done, pid=50 2024-12-06T10:17:30,711 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=50, resume processing ppid=49 2024-12-06T10:17:30,711 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=50, ppid=49, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.5950 sec 2024-12-06T10:17:30,713 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=49, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=49, table=TestAcidGuarantees in 2.6030 sec 2024-12-06T10:17:31,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(8581): Flush requested on cba9fa4bb7ad0155608756e918c3bf01 2024-12-06T10:17:31,559 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing cba9fa4bb7ad0155608756e918c3bf01 3/3 column families, dataSize=127.47 KB heapSize=334.73 KB 2024-12-06T10:17:31,559 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cba9fa4bb7ad0155608756e918c3bf01, store=A 2024-12-06T10:17:31,559 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:17:31,559 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cba9fa4bb7ad0155608756e918c3bf01, store=B 2024-12-06T10:17:31,559 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:17:31,559 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cba9fa4bb7ad0155608756e918c3bf01, store=C 2024-12-06T10:17:31,559 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:17:31,568 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241206ae19fdeb217d4b73a8b3cf5b27c12b7d_cba9fa4bb7ad0155608756e918c3bf01 is 50, key is test_row_0/A:col10/1733480249387/Put/seqid=0 2024-12-06T10:17:31,575 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:31,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43864 deadline: 1733480311571, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:31,575 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:31,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43884 deadline: 1733480311573, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:31,576 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:31,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43930 deadline: 1733480311573, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:31,578 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:31,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43880 deadline: 1733480311575, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:31,579 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:31,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43906 deadline: 1733480311577, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:31,597 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742013_1189 (size=14694) 2024-12-06T10:17:31,679 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:31,679 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:31,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43884 deadline: 1733480311676, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:31,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43864 deadline: 1733480311676, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:31,680 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:31,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43930 deadline: 1733480311677, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:31,681 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:31,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43906 deadline: 1733480311680, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:31,681 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:31,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43880 deadline: 1733480311681, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:31,884 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:31,884 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:31,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43864 deadline: 1733480311880, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:31,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43930 deadline: 1733480311881, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:31,884 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:31,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43884 deadline: 1733480311881, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:31,885 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:31,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43880 deadline: 1733480311882, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:31,885 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:31,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43906 deadline: 1733480311883, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:31,998 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:32,004 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241206ae19fdeb217d4b73a8b3cf5b27c12b7d_cba9fa4bb7ad0155608756e918c3bf01 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241206ae19fdeb217d4b73a8b3cf5b27c12b7d_cba9fa4bb7ad0155608756e918c3bf01 2024-12-06T10:17:32,005 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/.tmp/A/939ada9d144b44088aef2c15bc5ee6a9, store: [table=TestAcidGuarantees family=A region=cba9fa4bb7ad0155608756e918c3bf01] 2024-12-06T10:17:32,006 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/.tmp/A/939ada9d144b44088aef2c15bc5ee6a9 is 175, key is test_row_0/A:col10/1733480249387/Put/seqid=0 2024-12-06T10:17:32,024 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742014_1190 (size=39649) 2024-12-06T10:17:32,186 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:32,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43930 deadline: 1733480312186, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:32,189 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:32,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43906 deadline: 1733480312187, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:32,189 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:32,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43864 deadline: 1733480312187, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:32,190 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:32,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43884 deadline: 1733480312187, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:32,190 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:32,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43880 deadline: 1733480312188, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:32,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=49 2024-12-06T10:17:32,218 INFO [Thread-774 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 49 completed 2024-12-06T10:17:32,219 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-06T10:17:32,221 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] procedure2.ProcedureExecutor(1098): Stored pid=51, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=51, table=TestAcidGuarantees 2024-12-06T10:17:32,221 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=51 2024-12-06T10:17:32,222 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=51, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=51, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-06T10:17:32,222 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=51, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=51, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-06T10:17:32,222 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=52, ppid=51, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-06T10:17:32,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=51 2024-12-06T10:17:32,374 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,33397,1733480204743 2024-12-06T10:17:32,374 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33397 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=52 2024-12-06T10:17:32,374 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01. 2024-12-06T10:17:32,374 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01. as already flushing 2024-12-06T10:17:32,375 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01. 2024-12-06T10:17:32,375 ERROR [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] handler.RSProcedureHandler(58): pid=52 java.io.IOException: Unable to complete flush {ENCODED => cba9fa4bb7ad0155608756e918c3bf01, NAME => 'TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:17:32,375 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=52 java.io.IOException: Unable to complete flush {ENCODED => cba9fa4bb7ad0155608756e918c3bf01, NAME => 'TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:17:32,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.HMaster(4114): Remote procedure failed, pid=52 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => cba9fa4bb7ad0155608756e918c3bf01, NAME => 'TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => cba9fa4bb7ad0155608756e918c3bf01, NAME => 'TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:17:32,424 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=131, memsize=42.5 K, hasBloomFilter=true, into tmp file hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/.tmp/A/939ada9d144b44088aef2c15bc5ee6a9 2024-12-06T10:17:32,433 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/.tmp/B/4642c961777f48ed80f7e220e02936d9 is 50, key is test_row_0/B:col10/1733480249387/Put/seqid=0 2024-12-06T10:17:32,453 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742015_1191 (size=12101) 2024-12-06T10:17:32,454 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=131 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/.tmp/B/4642c961777f48ed80f7e220e02936d9 2024-12-06T10:17:32,477 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/.tmp/C/64030262cb0e4f13892061894630224f is 50, key is test_row_0/C:col10/1733480249387/Put/seqid=0 2024-12-06T10:17:32,502 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742016_1192 (size=12101) 2024-12-06T10:17:32,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=51 2024-12-06T10:17:32,527 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,33397,1733480204743 2024-12-06T10:17:32,527 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33397 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=52 2024-12-06T10:17:32,527 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01. 2024-12-06T10:17:32,528 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01. as already flushing 2024-12-06T10:17:32,528 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01. 2024-12-06T10:17:32,528 ERROR [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] handler.RSProcedureHandler(58): pid=52 java.io.IOException: Unable to complete flush {ENCODED => cba9fa4bb7ad0155608756e918c3bf01, NAME => 'TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:17:32,528 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=52 java.io.IOException: Unable to complete flush {ENCODED => cba9fa4bb7ad0155608756e918c3bf01, NAME => 'TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:17:32,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.HMaster(4114): Remote procedure failed, pid=52 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => cba9fa4bb7ad0155608756e918c3bf01, NAME => 'TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => cba9fa4bb7ad0155608756e918c3bf01, NAME => 'TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:17:32,680 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,33397,1733480204743 2024-12-06T10:17:32,680 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33397 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=52 2024-12-06T10:17:32,681 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01. 2024-12-06T10:17:32,681 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01. as already flushing 2024-12-06T10:17:32,681 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01. 2024-12-06T10:17:32,681 ERROR [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] handler.RSProcedureHandler(58): pid=52 java.io.IOException: Unable to complete flush {ENCODED => cba9fa4bb7ad0155608756e918c3bf01, NAME => 'TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:17:32,681 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=52 java.io.IOException: Unable to complete flush {ENCODED => cba9fa4bb7ad0155608756e918c3bf01, NAME => 'TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:17:32,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.HMaster(4114): Remote procedure failed, pid=52 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => cba9fa4bb7ad0155608756e918c3bf01, NAME => 'TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => cba9fa4bb7ad0155608756e918c3bf01, NAME => 'TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:17:32,688 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:32,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43930 deadline: 1733480312688, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:32,692 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:32,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43880 deadline: 1733480312692, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:32,692 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:32,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43906 deadline: 1733480312692, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:32,693 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:32,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43884 deadline: 1733480312692, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:32,693 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:32,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43864 deadline: 1733480312693, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:32,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=51 2024-12-06T10:17:32,833 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,33397,1733480204743 2024-12-06T10:17:32,834 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33397 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=52 2024-12-06T10:17:32,834 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01. 2024-12-06T10:17:32,834 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01. as already flushing 2024-12-06T10:17:32,834 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01. 2024-12-06T10:17:32,834 ERROR [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] handler.RSProcedureHandler(58): pid=52 java.io.IOException: Unable to complete flush {ENCODED => cba9fa4bb7ad0155608756e918c3bf01, NAME => 'TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:17:32,834 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=52 java.io.IOException: Unable to complete flush {ENCODED => cba9fa4bb7ad0155608756e918c3bf01, NAME => 'TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:17:32,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.HMaster(4114): Remote procedure failed, pid=52 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => cba9fa4bb7ad0155608756e918c3bf01, NAME => 'TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => cba9fa4bb7ad0155608756e918c3bf01, NAME => 'TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:17:32,905 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=131 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/.tmp/C/64030262cb0e4f13892061894630224f 2024-12-06T10:17:32,910 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/.tmp/A/939ada9d144b44088aef2c15bc5ee6a9 as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/A/939ada9d144b44088aef2c15bc5ee6a9 2024-12-06T10:17:32,915 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/A/939ada9d144b44088aef2c15bc5ee6a9, entries=200, sequenceid=131, filesize=38.7 K 2024-12-06T10:17:32,916 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/.tmp/B/4642c961777f48ed80f7e220e02936d9 as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/B/4642c961777f48ed80f7e220e02936d9 2024-12-06T10:17:32,921 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/B/4642c961777f48ed80f7e220e02936d9, entries=150, sequenceid=131, filesize=11.8 K 2024-12-06T10:17:32,922 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/.tmp/C/64030262cb0e4f13892061894630224f as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/C/64030262cb0e4f13892061894630224f 2024-12-06T10:17:32,930 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/C/64030262cb0e4f13892061894630224f, entries=150, sequenceid=131, filesize=11.8 K 2024-12-06T10:17:32,931 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~127.47 KB/130530, heapSize ~334.69 KB/342720, currentSize=80.51 KB/82440 for cba9fa4bb7ad0155608756e918c3bf01 in 1372ms, sequenceid=131, compaction requested=true 2024-12-06T10:17:32,931 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for cba9fa4bb7ad0155608756e918c3bf01: 2024-12-06T10:17:32,931 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store cba9fa4bb7ad0155608756e918c3bf01:A, priority=-2147483648, current under compaction store size is 1 2024-12-06T10:17:32,931 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T10:17:32,931 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-06T10:17:32,931 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-06T10:17:32,932 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store cba9fa4bb7ad0155608756e918c3bf01:B, priority=-2147483648, current under compaction store size is 2 2024-12-06T10:17:32,932 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T10:17:32,932 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store cba9fa4bb7ad0155608756e918c3bf01:C, priority=-2147483648, current under compaction store size is 3 2024-12-06T10:17:32,932 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-06T10:17:32,933 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 132652 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-06T10:17:32,933 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HStore(1540): cba9fa4bb7ad0155608756e918c3bf01/A is initiating minor compaction (all files) 2024-12-06T10:17:32,934 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of cba9fa4bb7ad0155608756e918c3bf01/A in TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01. 2024-12-06T10:17:32,934 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/A/064931b8b64440a5a32b69f20fb43a97, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/A/a2b5f018e978499db43971231708c209, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/A/672acde59450412cb03bf21f25565d91, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/A/939ada9d144b44088aef2c15bc5ee6a9] into tmpdir=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/.tmp, totalSize=129.5 K 2024-12-06T10:17:32,934 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=12 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01. 2024-12-06T10:17:32,934 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01. files: [hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/A/064931b8b64440a5a32b69f20fb43a97, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/A/a2b5f018e978499db43971231708c209, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/A/672acde59450412cb03bf21f25565d91, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/A/939ada9d144b44088aef2c15bc5ee6a9] 2024-12-06T10:17:32,934 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48242 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-06T10:17:32,934 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HStore(1540): cba9fa4bb7ad0155608756e918c3bf01/B is initiating minor compaction (all files) 2024-12-06T10:17:32,934 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of cba9fa4bb7ad0155608756e918c3bf01/B in TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01. 2024-12-06T10:17:32,934 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/B/54fe9dde453d4b2ca2b7257384f16459, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/B/73f15746812c411cb342f806e3554220, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/B/61f56e64c7cd4182adb4b6a0c122e7b3, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/B/4642c961777f48ed80f7e220e02936d9] into tmpdir=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/.tmp, totalSize=47.1 K 2024-12-06T10:17:32,935 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.Compactor(224): Compacting 064931b8b64440a5a32b69f20fb43a97, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=70, earliestPutTs=1733480247566 2024-12-06T10:17:32,935 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.Compactor(224): Compacting 54fe9dde453d4b2ca2b7257384f16459, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=70, earliestPutTs=1733480247566 2024-12-06T10:17:32,936 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.Compactor(224): Compacting 73f15746812c411cb342f806e3554220, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=90, earliestPutTs=1733480247603 2024-12-06T10:17:32,936 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.Compactor(224): Compacting a2b5f018e978499db43971231708c209, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=90, earliestPutTs=1733480247603 2024-12-06T10:17:32,936 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.Compactor(224): Compacting 61f56e64c7cd4182adb4b6a0c122e7b3, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=109, earliestPutTs=1733480248239 2024-12-06T10:17:32,936 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.Compactor(224): Compacting 672acde59450412cb03bf21f25565d91, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=109, earliestPutTs=1733480248239 2024-12-06T10:17:32,937 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.Compactor(224): Compacting 4642c961777f48ed80f7e220e02936d9, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=131, earliestPutTs=1733480249387 2024-12-06T10:17:32,937 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.Compactor(224): Compacting 939ada9d144b44088aef2c15bc5ee6a9, keycount=200, bloomtype=ROW, size=38.7 K, encoding=NONE, compression=NONE, seqNum=131, earliestPutTs=1733480249387 2024-12-06T10:17:32,952 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): cba9fa4bb7ad0155608756e918c3bf01#B#compaction#167 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-06T10:17:32,952 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/.tmp/B/6416d8f693154d1ca2261afad97bc58d is 50, key is test_row_0/B:col10/1733480249387/Put/seqid=0 2024-12-06T10:17:32,956 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=cba9fa4bb7ad0155608756e918c3bf01] 2024-12-06T10:17:32,964 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e2024120608a9c38a7aca4ab9a2077ba089c26647_cba9fa4bb7ad0155608756e918c3bf01 store=[table=TestAcidGuarantees family=A region=cba9fa4bb7ad0155608756e918c3bf01] 2024-12-06T10:17:32,965 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742017_1193 (size=12375) 2024-12-06T10:17:32,969 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e2024120608a9c38a7aca4ab9a2077ba089c26647_cba9fa4bb7ad0155608756e918c3bf01, store=[table=TestAcidGuarantees family=A region=cba9fa4bb7ad0155608756e918c3bf01] 2024-12-06T10:17:32,969 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024120608a9c38a7aca4ab9a2077ba089c26647_cba9fa4bb7ad0155608756e918c3bf01 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=cba9fa4bb7ad0155608756e918c3bf01] 2024-12-06T10:17:32,974 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/.tmp/B/6416d8f693154d1ca2261afad97bc58d as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/B/6416d8f693154d1ca2261afad97bc58d 2024-12-06T10:17:32,980 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in cba9fa4bb7ad0155608756e918c3bf01/B of cba9fa4bb7ad0155608756e918c3bf01 into 6416d8f693154d1ca2261afad97bc58d(size=12.1 K), total size for store is 12.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-06T10:17:32,980 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for cba9fa4bb7ad0155608756e918c3bf01: 2024-12-06T10:17:32,981 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01., storeName=cba9fa4bb7ad0155608756e918c3bf01/B, priority=12, startTime=1733480252931; duration=0sec 2024-12-06T10:17:32,981 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-06T10:17:32,981 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: cba9fa4bb7ad0155608756e918c3bf01:B 2024-12-06T10:17:32,981 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-06T10:17:32,983 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48242 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-06T10:17:32,983 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HStore(1540): cba9fa4bb7ad0155608756e918c3bf01/C is initiating minor compaction (all files) 2024-12-06T10:17:32,983 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of cba9fa4bb7ad0155608756e918c3bf01/C in TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01. 2024-12-06T10:17:32,983 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/C/b482c3d0b7554e35996b8da20c4dce86, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/C/042d1fb260e741b4a1999ed5e135fb2c, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/C/08b016b343d2406d9172dfff65942a9f, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/C/64030262cb0e4f13892061894630224f] into tmpdir=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/.tmp, totalSize=47.1 K 2024-12-06T10:17:32,984 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.Compactor(224): Compacting b482c3d0b7554e35996b8da20c4dce86, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=70, earliestPutTs=1733480247566 2024-12-06T10:17:32,984 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.Compactor(224): Compacting 042d1fb260e741b4a1999ed5e135fb2c, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=90, earliestPutTs=1733480247603 2024-12-06T10:17:32,986 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.Compactor(224): Compacting 08b016b343d2406d9172dfff65942a9f, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=109, earliestPutTs=1733480248239 2024-12-06T10:17:32,986 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.Compactor(224): Compacting 64030262cb0e4f13892061894630224f, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=131, earliestPutTs=1733480249387 2024-12-06T10:17:32,987 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,33397,1733480204743 2024-12-06T10:17:32,988 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33397 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=52 2024-12-06T10:17:32,988 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01. 2024-12-06T10:17:32,988 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegion(2837): Flushing cba9fa4bb7ad0155608756e918c3bf01 3/3 column families, dataSize=80.51 KB heapSize=211.69 KB 2024-12-06T10:17:32,988 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cba9fa4bb7ad0155608756e918c3bf01, store=A 2024-12-06T10:17:32,988 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:17:32,989 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cba9fa4bb7ad0155608756e918c3bf01, store=B 2024-12-06T10:17:32,989 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:17:32,989 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cba9fa4bb7ad0155608756e918c3bf01, store=C 2024-12-06T10:17:32,989 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:17:32,997 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742018_1194 (size=4469) 2024-12-06T10:17:33,000 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): cba9fa4bb7ad0155608756e918c3bf01#A#compaction#168 average throughput is 0.57 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-06T10:17:33,001 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/.tmp/A/7895548bed4c48acbb7b8368b602c642 is 175, key is test_row_0/A:col10/1733480249387/Put/seqid=0 2024-12-06T10:17:33,004 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241206e113df2ee3c24b84ad1863ba26c42d71_cba9fa4bb7ad0155608756e918c3bf01 is 50, key is test_row_0/A:col10/1733480251573/Put/seqid=0 2024-12-06T10:17:33,008 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): cba9fa4bb7ad0155608756e918c3bf01#C#compaction#170 average throughput is 1.64 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-06T10:17:33,008 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/.tmp/C/2fc8d4f88e6441179a740b2fd7cabfe9 is 50, key is test_row_0/C:col10/1733480249387/Put/seqid=0 2024-12-06T10:17:33,019 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742019_1195 (size=31329) 2024-12-06T10:17:33,028 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/.tmp/A/7895548bed4c48acbb7b8368b602c642 as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/A/7895548bed4c48acbb7b8368b602c642 2024-12-06T10:17:33,034 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in cba9fa4bb7ad0155608756e918c3bf01/A of cba9fa4bb7ad0155608756e918c3bf01 into 7895548bed4c48acbb7b8368b602c642(size=30.6 K), total size for store is 30.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-06T10:17:33,034 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for cba9fa4bb7ad0155608756e918c3bf01: 2024-12-06T10:17:33,034 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01., storeName=cba9fa4bb7ad0155608756e918c3bf01/A, priority=12, startTime=1733480252931; duration=0sec 2024-12-06T10:17:33,034 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T10:17:33,034 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: cba9fa4bb7ad0155608756e918c3bf01:A 2024-12-06T10:17:33,044 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742020_1196 (size=12304) 2024-12-06T10:17:33,046 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:33,049 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742021_1197 (size=12375) 2024-12-06T10:17:33,051 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241206e113df2ee3c24b84ad1863ba26c42d71_cba9fa4bb7ad0155608756e918c3bf01 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241206e113df2ee3c24b84ad1863ba26c42d71_cba9fa4bb7ad0155608756e918c3bf01 2024-12-06T10:17:33,054 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/.tmp/A/7813153bf789489d9e168fc5e3dcece1, store: [table=TestAcidGuarantees family=A region=cba9fa4bb7ad0155608756e918c3bf01] 2024-12-06T10:17:33,056 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/.tmp/A/7813153bf789489d9e168fc5e3dcece1 is 175, key is test_row_0/A:col10/1733480251573/Put/seqid=0 2024-12-06T10:17:33,061 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/.tmp/C/2fc8d4f88e6441179a740b2fd7cabfe9 as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/C/2fc8d4f88e6441179a740b2fd7cabfe9 2024-12-06T10:17:33,068 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in cba9fa4bb7ad0155608756e918c3bf01/C of cba9fa4bb7ad0155608756e918c3bf01 into 2fc8d4f88e6441179a740b2fd7cabfe9(size=12.1 K), total size for store is 12.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-06T10:17:33,068 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for cba9fa4bb7ad0155608756e918c3bf01: 2024-12-06T10:17:33,068 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01., storeName=cba9fa4bb7ad0155608756e918c3bf01/C, priority=12, startTime=1733480252932; duration=0sec 2024-12-06T10:17:33,068 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T10:17:33,068 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: cba9fa4bb7ad0155608756e918c3bf01:C 2024-12-06T10:17:33,077 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742022_1198 (size=31105) 2024-12-06T10:17:33,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=51 2024-12-06T10:17:33,478 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=147, memsize=26.8 K, hasBloomFilter=true, into tmp file hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/.tmp/A/7813153bf789489d9e168fc5e3dcece1 2024-12-06T10:17:33,488 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/.tmp/B/e50f4765b8634e0c9e29c8f6746b3187 is 50, key is test_row_0/B:col10/1733480251573/Put/seqid=0 2024-12-06T10:17:33,493 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742023_1199 (size=12151) 2024-12-06T10:17:33,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(8581): Flush requested on cba9fa4bb7ad0155608756e918c3bf01 2024-12-06T10:17:33,696 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01. as already flushing 2024-12-06T10:17:33,711 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:33,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 75 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43906 deadline: 1733480313709, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:33,712 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:33,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43864 deadline: 1733480313710, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:33,712 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:33,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43884 deadline: 1733480313710, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:33,712 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:33,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 77 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43930 deadline: 1733480313711, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:33,713 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:33,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 75 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43880 deadline: 1733480313712, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:33,815 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:33,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 77 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43906 deadline: 1733480313813, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:33,816 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:33,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43864 deadline: 1733480313813, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:33,816 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:33,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 77 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43880 deadline: 1733480313814, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:33,818 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:33,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43884 deadline: 1733480313815, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:33,818 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:33,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 79 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43930 deadline: 1733480313815, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:33,894 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=147 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/.tmp/B/e50f4765b8634e0c9e29c8f6746b3187 2024-12-06T10:17:33,903 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/.tmp/C/dc3d1cc317ae41fb96fa0119cfed8cc3 is 50, key is test_row_0/C:col10/1733480251573/Put/seqid=0 2024-12-06T10:17:33,914 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742024_1200 (size=12151) 2024-12-06T10:17:33,915 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=147 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/.tmp/C/dc3d1cc317ae41fb96fa0119cfed8cc3 2024-12-06T10:17:33,921 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/.tmp/A/7813153bf789489d9e168fc5e3dcece1 as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/A/7813153bf789489d9e168fc5e3dcece1 2024-12-06T10:17:33,926 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/A/7813153bf789489d9e168fc5e3dcece1, entries=150, sequenceid=147, filesize=30.4 K 2024-12-06T10:17:33,927 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/.tmp/B/e50f4765b8634e0c9e29c8f6746b3187 as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/B/e50f4765b8634e0c9e29c8f6746b3187 2024-12-06T10:17:33,933 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/B/e50f4765b8634e0c9e29c8f6746b3187, entries=150, sequenceid=147, filesize=11.9 K 2024-12-06T10:17:33,935 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/.tmp/C/dc3d1cc317ae41fb96fa0119cfed8cc3 as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/C/dc3d1cc317ae41fb96fa0119cfed8cc3 2024-12-06T10:17:33,940 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/C/dc3d1cc317ae41fb96fa0119cfed8cc3, entries=150, sequenceid=147, filesize=11.9 K 2024-12-06T10:17:33,941 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegion(3040): Finished flush of dataSize ~80.51 KB/82440, heapSize ~211.64 KB/216720, currentSize=120.76 KB/123660 for cba9fa4bb7ad0155608756e918c3bf01 in 953ms, sequenceid=147, compaction requested=false 2024-12-06T10:17:33,941 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegion(2538): Flush status journal for cba9fa4bb7ad0155608756e918c3bf01: 2024-12-06T10:17:33,941 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01. 2024-12-06T10:17:33,941 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=52 2024-12-06T10:17:33,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.HMaster(4106): Remote procedure done, pid=52 2024-12-06T10:17:33,944 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=52, resume processing ppid=51 2024-12-06T10:17:33,944 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=52, ppid=51, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.7200 sec 2024-12-06T10:17:33,945 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=51, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=51, table=TestAcidGuarantees in 1.7250 sec 2024-12-06T10:17:34,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(8581): Flush requested on cba9fa4bb7ad0155608756e918c3bf01 2024-12-06T10:17:34,021 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing cba9fa4bb7ad0155608756e918c3bf01 3/3 column families, dataSize=127.47 KB heapSize=334.73 KB 2024-12-06T10:17:34,022 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cba9fa4bb7ad0155608756e918c3bf01, store=A 2024-12-06T10:17:34,023 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:17:34,023 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cba9fa4bb7ad0155608756e918c3bf01, store=B 2024-12-06T10:17:34,023 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:17:34,023 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cba9fa4bb7ad0155608756e918c3bf01, store=C 2024-12-06T10:17:34,023 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:17:34,031 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024120689b7355a11a342f087ec0a9e22d75e79_cba9fa4bb7ad0155608756e918c3bf01 is 50, key is test_row_0/A:col10/1733480253710/Put/seqid=0 2024-12-06T10:17:34,036 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:34,036 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43884 deadline: 1733480314031, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:34,037 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:34,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43930 deadline: 1733480314032, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:34,037 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:34,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43880 deadline: 1733480314033, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:34,038 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:34,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43906 deadline: 1733480314033, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:34,038 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:34,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 79 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43864 deadline: 1733480314034, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:34,046 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742025_1201 (size=12304) 2024-12-06T10:17:34,139 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:34,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43884 deadline: 1733480314137, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:34,140 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:34,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43930 deadline: 1733480314138, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:34,140 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:34,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43880 deadline: 1733480314138, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:34,140 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:34,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43906 deadline: 1733480314139, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:34,140 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:34,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 81 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43864 deadline: 1733480314139, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:34,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=51 2024-12-06T10:17:34,326 INFO [Thread-774 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 51 completed 2024-12-06T10:17:34,327 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-06T10:17:34,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] procedure2.ProcedureExecutor(1098): Stored pid=53, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=53, table=TestAcidGuarantees 2024-12-06T10:17:34,329 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=53 2024-12-06T10:17:34,329 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=53, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=53, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-06T10:17:34,329 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=53, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=53, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-06T10:17:34,329 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=54, ppid=53, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-06T10:17:34,341 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:34,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 87 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43930 deadline: 1733480314341, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:34,343 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:34,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43884 deadline: 1733480314341, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:34,343 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:34,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43880 deadline: 1733480314342, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:34,344 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:34,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43906 deadline: 1733480314342, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:34,345 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:34,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43864 deadline: 1733480314342, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:34,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=53 2024-12-06T10:17:34,447 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:34,452 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024120689b7355a11a342f087ec0a9e22d75e79_cba9fa4bb7ad0155608756e918c3bf01 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024120689b7355a11a342f087ec0a9e22d75e79_cba9fa4bb7ad0155608756e918c3bf01 2024-12-06T10:17:34,453 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/.tmp/A/0f6388d96f4542a5b8c27049dc9e4aaa, store: [table=TestAcidGuarantees family=A region=cba9fa4bb7ad0155608756e918c3bf01] 2024-12-06T10:17:34,454 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/.tmp/A/0f6388d96f4542a5b8c27049dc9e4aaa is 175, key is test_row_0/A:col10/1733480253710/Put/seqid=0 2024-12-06T10:17:34,460 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742026_1202 (size=31105) 2024-12-06T10:17:34,461 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=173, memsize=47.0 K, hasBloomFilter=true, into tmp file hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/.tmp/A/0f6388d96f4542a5b8c27049dc9e4aaa 2024-12-06T10:17:34,472 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/.tmp/B/d7b3425a23bb40b586ea3832c9aec931 is 50, key is test_row_0/B:col10/1733480253710/Put/seqid=0 2024-12-06T10:17:34,480 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742027_1203 (size=12151) 2024-12-06T10:17:34,481 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,33397,1733480204743 2024-12-06T10:17:34,482 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33397 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=54 2024-12-06T10:17:34,482 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01. 2024-12-06T10:17:34,482 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01. as already flushing 2024-12-06T10:17:34,482 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01. 2024-12-06T10:17:34,482 ERROR [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] handler.RSProcedureHandler(58): pid=54 java.io.IOException: Unable to complete flush {ENCODED => cba9fa4bb7ad0155608756e918c3bf01, NAME => 'TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:17:34,482 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=54 java.io.IOException: Unable to complete flush {ENCODED => cba9fa4bb7ad0155608756e918c3bf01, NAME => 'TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:17:34,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.HMaster(4114): Remote procedure failed, pid=54 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => cba9fa4bb7ad0155608756e918c3bf01, NAME => 'TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => cba9fa4bb7ad0155608756e918c3bf01, NAME => 'TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:17:34,488 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=173 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/.tmp/B/d7b3425a23bb40b586ea3832c9aec931 2024-12-06T10:17:34,495 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/.tmp/C/52f64a40c89d4c5492673cf0f0c7c247 is 50, key is test_row_0/C:col10/1733480253710/Put/seqid=0 2024-12-06T10:17:34,500 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742028_1204 (size=12151) 2024-12-06T10:17:34,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=53 2024-12-06T10:17:34,634 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,33397,1733480204743 2024-12-06T10:17:34,635 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33397 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=54 2024-12-06T10:17:34,635 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01. 2024-12-06T10:17:34,636 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01. as already flushing 2024-12-06T10:17:34,636 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01. 2024-12-06T10:17:34,636 ERROR [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] handler.RSProcedureHandler(58): pid=54 java.io.IOException: Unable to complete flush {ENCODED => cba9fa4bb7ad0155608756e918c3bf01, NAME => 'TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:17:34,636 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=54 java.io.IOException: Unable to complete flush {ENCODED => cba9fa4bb7ad0155608756e918c3bf01, NAME => 'TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:17:34,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.HMaster(4114): Remote procedure failed, pid=54 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => cba9fa4bb7ad0155608756e918c3bf01, NAME => 'TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => cba9fa4bb7ad0155608756e918c3bf01, NAME => 'TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:17:34,647 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:34,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 89 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43930 deadline: 1733480314643, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:34,647 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:34,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43880 deadline: 1733480314645, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:34,647 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:34,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43864 deadline: 1733480314646, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:34,647 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:34,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43884 deadline: 1733480314646, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:34,647 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:34,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43906 deadline: 1733480314646, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:34,788 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,33397,1733480204743 2024-12-06T10:17:34,788 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33397 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=54 2024-12-06T10:17:34,788 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01. 2024-12-06T10:17:34,789 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01. as already flushing 2024-12-06T10:17:34,789 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01. 2024-12-06T10:17:34,789 ERROR [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] handler.RSProcedureHandler(58): pid=54 java.io.IOException: Unable to complete flush {ENCODED => cba9fa4bb7ad0155608756e918c3bf01, NAME => 'TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:17:34,789 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=54 java.io.IOException: Unable to complete flush {ENCODED => cba9fa4bb7ad0155608756e918c3bf01, NAME => 'TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:17:34,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.HMaster(4114): Remote procedure failed, pid=54 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => cba9fa4bb7ad0155608756e918c3bf01, NAME => 'TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => cba9fa4bb7ad0155608756e918c3bf01, NAME => 'TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:17:34,902 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=173 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/.tmp/C/52f64a40c89d4c5492673cf0f0c7c247 2024-12-06T10:17:34,907 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/.tmp/A/0f6388d96f4542a5b8c27049dc9e4aaa as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/A/0f6388d96f4542a5b8c27049dc9e4aaa 2024-12-06T10:17:34,912 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/A/0f6388d96f4542a5b8c27049dc9e4aaa, entries=150, sequenceid=173, filesize=30.4 K 2024-12-06T10:17:34,913 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/.tmp/B/d7b3425a23bb40b586ea3832c9aec931 as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/B/d7b3425a23bb40b586ea3832c9aec931 2024-12-06T10:17:34,918 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/B/d7b3425a23bb40b586ea3832c9aec931, entries=150, sequenceid=173, filesize=11.9 K 2024-12-06T10:17:34,920 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/.tmp/C/52f64a40c89d4c5492673cf0f0c7c247 as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/C/52f64a40c89d4c5492673cf0f0c7c247 2024-12-06T10:17:34,924 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/C/52f64a40c89d4c5492673cf0f0c7c247, entries=150, sequenceid=173, filesize=11.9 K 2024-12-06T10:17:34,925 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=67.09 KB/68700 for cba9fa4bb7ad0155608756e918c3bf01 in 904ms, sequenceid=173, compaction requested=true 2024-12-06T10:17:34,925 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for cba9fa4bb7ad0155608756e918c3bf01: 2024-12-06T10:17:34,925 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-06T10:17:34,925 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store cba9fa4bb7ad0155608756e918c3bf01:A, priority=-2147483648, current under compaction store size is 1 2024-12-06T10:17:34,926 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T10:17:34,926 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-06T10:17:34,926 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store cba9fa4bb7ad0155608756e918c3bf01:B, priority=-2147483648, current under compaction store size is 2 2024-12-06T10:17:34,926 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T10:17:34,926 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store cba9fa4bb7ad0155608756e918c3bf01:C, priority=-2147483648, current under compaction store size is 3 2024-12-06T10:17:34,926 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-06T10:17:34,926 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 93539 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-06T10:17:34,926 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HStore(1540): cba9fa4bb7ad0155608756e918c3bf01/A is initiating minor compaction (all files) 2024-12-06T10:17:34,927 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of cba9fa4bb7ad0155608756e918c3bf01/A in TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01. 2024-12-06T10:17:34,927 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/A/7895548bed4c48acbb7b8368b602c642, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/A/7813153bf789489d9e168fc5e3dcece1, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/A/0f6388d96f4542a5b8c27049dc9e4aaa] into tmpdir=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/.tmp, totalSize=91.3 K 2024-12-06T10:17:34,927 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01. 2024-12-06T10:17:34,927 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01. files: [hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/A/7895548bed4c48acbb7b8368b602c642, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/A/7813153bf789489d9e168fc5e3dcece1, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/A/0f6388d96f4542a5b8c27049dc9e4aaa] 2024-12-06T10:17:34,927 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36677 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-06T10:17:34,927 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HStore(1540): cba9fa4bb7ad0155608756e918c3bf01/B is initiating minor compaction (all files) 2024-12-06T10:17:34,927 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of cba9fa4bb7ad0155608756e918c3bf01/B in TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01. 2024-12-06T10:17:34,927 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/B/6416d8f693154d1ca2261afad97bc58d, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/B/e50f4765b8634e0c9e29c8f6746b3187, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/B/d7b3425a23bb40b586ea3832c9aec931] into tmpdir=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/.tmp, totalSize=35.8 K 2024-12-06T10:17:34,928 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.Compactor(224): Compacting 7895548bed4c48acbb7b8368b602c642, keycount=150, bloomtype=ROW, size=30.6 K, encoding=NONE, compression=NONE, seqNum=131, earliestPutTs=1733480249387 2024-12-06T10:17:34,928 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.Compactor(224): Compacting 6416d8f693154d1ca2261afad97bc58d, keycount=150, bloomtype=ROW, size=12.1 K, encoding=NONE, compression=NONE, seqNum=131, earliestPutTs=1733480249387 2024-12-06T10:17:34,928 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.Compactor(224): Compacting 7813153bf789489d9e168fc5e3dcece1, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=147, earliestPutTs=1733480251563 2024-12-06T10:17:34,928 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.Compactor(224): Compacting e50f4765b8634e0c9e29c8f6746b3187, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=147, earliestPutTs=1733480251563 2024-12-06T10:17:34,929 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.Compactor(224): Compacting d7b3425a23bb40b586ea3832c9aec931, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=173, earliestPutTs=1733480253709 2024-12-06T10:17:34,929 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.Compactor(224): Compacting 0f6388d96f4542a5b8c27049dc9e4aaa, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=173, earliestPutTs=1733480253709 2024-12-06T10:17:34,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=53 2024-12-06T10:17:34,939 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=cba9fa4bb7ad0155608756e918c3bf01] 2024-12-06T10:17:34,940 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): cba9fa4bb7ad0155608756e918c3bf01#B#compaction#176 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-06T10:17:34,941 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/.tmp/B/ecdd551d849c4eef91237be3d0585d05 is 50, key is test_row_0/B:col10/1733480253710/Put/seqid=0 2024-12-06T10:17:34,941 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,33397,1733480204743 2024-12-06T10:17:34,941 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33397 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=54 2024-12-06T10:17:34,941 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01. 2024-12-06T10:17:34,941 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegion(2837): Flushing cba9fa4bb7ad0155608756e918c3bf01 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-12-06T10:17:34,942 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cba9fa4bb7ad0155608756e918c3bf01, store=A 2024-12-06T10:17:34,942 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:17:34,942 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cba9fa4bb7ad0155608756e918c3bf01, store=B 2024-12-06T10:17:34,942 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:17:34,942 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cba9fa4bb7ad0155608756e918c3bf01, store=C 2024-12-06T10:17:34,942 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:17:34,944 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241206e8a3867500af429eb204a1a7dd512b05_cba9fa4bb7ad0155608756e918c3bf01 store=[table=TestAcidGuarantees family=A region=cba9fa4bb7ad0155608756e918c3bf01] 2024-12-06T10:17:34,946 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241206e8a3867500af429eb204a1a7dd512b05_cba9fa4bb7ad0155608756e918c3bf01, store=[table=TestAcidGuarantees family=A region=cba9fa4bb7ad0155608756e918c3bf01] 2024-12-06T10:17:34,946 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241206e8a3867500af429eb204a1a7dd512b05_cba9fa4bb7ad0155608756e918c3bf01 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=cba9fa4bb7ad0155608756e918c3bf01] 2024-12-06T10:17:34,955 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742029_1205 (size=12527) 2024-12-06T10:17:34,962 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412063e48cb394821491ca2297c15c4f9b28e_cba9fa4bb7ad0155608756e918c3bf01 is 50, key is test_row_0/A:col10/1733480254032/Put/seqid=0 2024-12-06T10:17:34,970 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742030_1206 (size=4469) 2024-12-06T10:17:34,973 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742031_1207 (size=12304) 2024-12-06T10:17:34,974 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:34,979 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412063e48cb394821491ca2297c15c4f9b28e_cba9fa4bb7ad0155608756e918c3bf01 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412063e48cb394821491ca2297c15c4f9b28e_cba9fa4bb7ad0155608756e918c3bf01 2024-12-06T10:17:34,980 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/.tmp/A/d578d4c3d0d24f3b876898bd5904bcda, store: [table=TestAcidGuarantees family=A region=cba9fa4bb7ad0155608756e918c3bf01] 2024-12-06T10:17:34,980 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/.tmp/A/d578d4c3d0d24f3b876898bd5904bcda is 175, key is test_row_0/A:col10/1733480254032/Put/seqid=0 2024-12-06T10:17:34,990 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742032_1208 (size=31105) 2024-12-06T10:17:34,991 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=186, memsize=22.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/.tmp/A/d578d4c3d0d24f3b876898bd5904bcda 2024-12-06T10:17:35,002 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/.tmp/B/b260896510a44fa5a5aec4232637e3ce is 50, key is test_row_0/B:col10/1733480254032/Put/seqid=0 2024-12-06T10:17:35,015 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742033_1209 (size=12151) 2024-12-06T10:17:35,151 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(8581): Flush requested on cba9fa4bb7ad0155608756e918c3bf01 2024-12-06T10:17:35,151 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01. as already flushing 2024-12-06T10:17:35,170 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:35,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43864 deadline: 1733480315168, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:35,171 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:35,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 95 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43930 deadline: 1733480315168, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:35,171 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:35,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 94 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43906 deadline: 1733480315169, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:35,171 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:35,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 92 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43884 deadline: 1733480315169, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:35,171 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:35,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 95 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43880 deadline: 1733480315170, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:35,272 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:35,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 97 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43930 deadline: 1733480315271, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:35,273 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:35,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 92 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43864 deadline: 1733480315272, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:35,273 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:35,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43906 deadline: 1733480315272, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:35,273 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:35,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 94 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43884 deadline: 1733480315272, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:35,274 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:35,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 97 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43880 deadline: 1733480315272, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:35,361 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/.tmp/B/ecdd551d849c4eef91237be3d0585d05 as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/B/ecdd551d849c4eef91237be3d0585d05 2024-12-06T10:17:35,367 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in cba9fa4bb7ad0155608756e918c3bf01/B of cba9fa4bb7ad0155608756e918c3bf01 into ecdd551d849c4eef91237be3d0585d05(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-06T10:17:35,367 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for cba9fa4bb7ad0155608756e918c3bf01: 2024-12-06T10:17:35,367 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01., storeName=cba9fa4bb7ad0155608756e918c3bf01/B, priority=13, startTime=1733480254926; duration=0sec 2024-12-06T10:17:35,368 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-06T10:17:35,368 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: cba9fa4bb7ad0155608756e918c3bf01:B 2024-12-06T10:17:35,368 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-06T10:17:35,369 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36677 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-06T10:17:35,369 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HStore(1540): cba9fa4bb7ad0155608756e918c3bf01/C is initiating minor compaction (all files) 2024-12-06T10:17:35,369 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of cba9fa4bb7ad0155608756e918c3bf01/C in TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01. 2024-12-06T10:17:35,370 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/C/2fc8d4f88e6441179a740b2fd7cabfe9, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/C/dc3d1cc317ae41fb96fa0119cfed8cc3, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/C/52f64a40c89d4c5492673cf0f0c7c247] into tmpdir=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/.tmp, totalSize=35.8 K 2024-12-06T10:17:35,370 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.Compactor(224): Compacting 2fc8d4f88e6441179a740b2fd7cabfe9, keycount=150, bloomtype=ROW, size=12.1 K, encoding=NONE, compression=NONE, seqNum=131, earliestPutTs=1733480249387 2024-12-06T10:17:35,371 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.Compactor(224): Compacting dc3d1cc317ae41fb96fa0119cfed8cc3, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=147, earliestPutTs=1733480251563 2024-12-06T10:17:35,371 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): cba9fa4bb7ad0155608756e918c3bf01#A#compaction#177 average throughput is 0.06 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-06T10:17:35,372 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/.tmp/A/7699a17437c248c1b7fce57023e8ae33 is 175, key is test_row_0/A:col10/1733480253710/Put/seqid=0 2024-12-06T10:17:35,372 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.Compactor(224): Compacting 52f64a40c89d4c5492673cf0f0c7c247, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=173, earliestPutTs=1733480253709 2024-12-06T10:17:35,382 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742034_1210 (size=31481) 2024-12-06T10:17:35,384 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): cba9fa4bb7ad0155608756e918c3bf01#C#compaction#180 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-06T10:17:35,385 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/.tmp/C/fb88e4b85e2947cbb307758b80d36890 is 50, key is test_row_0/C:col10/1733480253710/Put/seqid=0 2024-12-06T10:17:35,392 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742035_1211 (size=12527) 2024-12-06T10:17:35,399 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/.tmp/A/7699a17437c248c1b7fce57023e8ae33 as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/A/7699a17437c248c1b7fce57023e8ae33 2024-12-06T10:17:35,407 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/.tmp/C/fb88e4b85e2947cbb307758b80d36890 as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/C/fb88e4b85e2947cbb307758b80d36890 2024-12-06T10:17:35,407 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in cba9fa4bb7ad0155608756e918c3bf01/A of cba9fa4bb7ad0155608756e918c3bf01 into 7699a17437c248c1b7fce57023e8ae33(size=30.7 K), total size for store is 30.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-06T10:17:35,407 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for cba9fa4bb7ad0155608756e918c3bf01: 2024-12-06T10:17:35,407 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01., storeName=cba9fa4bb7ad0155608756e918c3bf01/A, priority=13, startTime=1733480254925; duration=0sec 2024-12-06T10:17:35,408 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T10:17:35,408 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: cba9fa4bb7ad0155608756e918c3bf01:A 2024-12-06T10:17:35,414 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in cba9fa4bb7ad0155608756e918c3bf01/C of cba9fa4bb7ad0155608756e918c3bf01 into fb88e4b85e2947cbb307758b80d36890(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-06T10:17:35,414 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for cba9fa4bb7ad0155608756e918c3bf01: 2024-12-06T10:17:35,414 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01., storeName=cba9fa4bb7ad0155608756e918c3bf01/C, priority=13, startTime=1733480254926; duration=0sec 2024-12-06T10:17:35,414 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T10:17:35,414 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: cba9fa4bb7ad0155608756e918c3bf01:C 2024-12-06T10:17:35,416 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=186 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/.tmp/B/b260896510a44fa5a5aec4232637e3ce 2024-12-06T10:17:35,424 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/.tmp/C/098cd0e195024eeda001cbb81079ff8c is 50, key is test_row_0/C:col10/1733480254032/Put/seqid=0 2024-12-06T10:17:35,429 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742036_1212 (size=12151) 2024-12-06T10:17:35,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=53 2024-12-06T10:17:35,477 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:35,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 94 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43864 deadline: 1733480315474, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:35,477 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:35,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43906 deadline: 1733480315474, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:35,478 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:35,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43884 deadline: 1733480315475, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:35,478 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:35,478 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:35,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 99 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43930 deadline: 1733480315475, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:35,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 99 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43880 deadline: 1733480315475, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:35,780 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:35,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43864 deadline: 1733480315779, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:35,781 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:35,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43906 deadline: 1733480315779, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:35,781 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:35,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43884 deadline: 1733480315780, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:35,782 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:35,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 101 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43880 deadline: 1733480315781, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:35,789 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:35,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 101 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43930 deadline: 1733480315788, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:35,830 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=186 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/.tmp/C/098cd0e195024eeda001cbb81079ff8c 2024-12-06T10:17:35,837 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/.tmp/A/d578d4c3d0d24f3b876898bd5904bcda as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/A/d578d4c3d0d24f3b876898bd5904bcda 2024-12-06T10:17:35,845 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/A/d578d4c3d0d24f3b876898bd5904bcda, entries=150, sequenceid=186, filesize=30.4 K 2024-12-06T10:17:35,846 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/.tmp/B/b260896510a44fa5a5aec4232637e3ce as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/B/b260896510a44fa5a5aec4232637e3ce 2024-12-06T10:17:35,853 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/B/b260896510a44fa5a5aec4232637e3ce, entries=150, sequenceid=186, filesize=11.9 K 2024-12-06T10:17:35,854 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/.tmp/C/098cd0e195024eeda001cbb81079ff8c as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/C/098cd0e195024eeda001cbb81079ff8c 2024-12-06T10:17:35,860 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/C/098cd0e195024eeda001cbb81079ff8c, entries=150, sequenceid=186, filesize=11.9 K 2024-12-06T10:17:35,861 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=134.18 KB/137400 for cba9fa4bb7ad0155608756e918c3bf01 in 920ms, sequenceid=186, compaction requested=false 2024-12-06T10:17:35,861 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegion(2538): Flush status journal for cba9fa4bb7ad0155608756e918c3bf01: 2024-12-06T10:17:35,861 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01. 2024-12-06T10:17:35,861 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=54 2024-12-06T10:17:35,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.HMaster(4106): Remote procedure done, pid=54 2024-12-06T10:17:35,864 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=54, resume processing ppid=53 2024-12-06T10:17:35,864 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=54, ppid=53, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.5340 sec 2024-12-06T10:17:35,866 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=53, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=53, table=TestAcidGuarantees in 1.5380 sec 2024-12-06T10:17:36,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(8581): Flush requested on cba9fa4bb7ad0155608756e918c3bf01 2024-12-06T10:17:36,286 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing cba9fa4bb7ad0155608756e918c3bf01 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-12-06T10:17:36,286 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cba9fa4bb7ad0155608756e918c3bf01, store=A 2024-12-06T10:17:36,287 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:17:36,287 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cba9fa4bb7ad0155608756e918c3bf01, store=B 2024-12-06T10:17:36,287 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:17:36,287 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cba9fa4bb7ad0155608756e918c3bf01, store=C 2024-12-06T10:17:36,287 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:17:36,296 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:36,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43884 deadline: 1733480316293, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:36,296 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:36,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43864 deadline: 1733480316295, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:36,298 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:36,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 105 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43906 deadline: 1733480316296, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:36,299 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:36,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 105 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43880 deadline: 1733480316296, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:36,299 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:36,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 104 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43930 deadline: 1733480316296, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:36,302 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412069a6dd467b2fd4ae08fcdf57ff74497eb_cba9fa4bb7ad0155608756e918c3bf01 is 50, key is test_row_0/A:col10/1733480255169/Put/seqid=0 2024-12-06T10:17:36,325 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742037_1213 (size=12304) 2024-12-06T10:17:36,399 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:36,399 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:36,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43864 deadline: 1733480316398, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:36,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 104 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43884 deadline: 1733480316397, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:36,400 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:36,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43906 deadline: 1733480316399, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:36,400 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:36,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43880 deadline: 1733480316400, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:36,402 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:36,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 106 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43930 deadline: 1733480316400, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:36,434 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=53 2024-12-06T10:17:36,435 INFO [Thread-774 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 53 completed 2024-12-06T10:17:36,436 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-06T10:17:36,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] procedure2.ProcedureExecutor(1098): Stored pid=55, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=55, table=TestAcidGuarantees 2024-12-06T10:17:36,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=55 2024-12-06T10:17:36,439 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=55, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=55, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-06T10:17:36,440 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=55, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=55, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-06T10:17:36,440 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=56, ppid=55, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-06T10:17:36,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=55 2024-12-06T10:17:36,592 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,33397,1733480204743 2024-12-06T10:17:36,592 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33397 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=56 2024-12-06T10:17:36,592 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01. 2024-12-06T10:17:36,593 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01. as already flushing 2024-12-06T10:17:36,593 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01. 2024-12-06T10:17:36,593 ERROR [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] handler.RSProcedureHandler(58): pid=56 java.io.IOException: Unable to complete flush {ENCODED => cba9fa4bb7ad0155608756e918c3bf01, NAME => 'TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:17:36,593 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=56 java.io.IOException: Unable to complete flush {ENCODED => cba9fa4bb7ad0155608756e918c3bf01, NAME => 'TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:17:36,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.HMaster(4114): Remote procedure failed, pid=56 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => cba9fa4bb7ad0155608756e918c3bf01, NAME => 'TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => cba9fa4bb7ad0155608756e918c3bf01, NAME => 'TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:17:36,603 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:36,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 106 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43884 deadline: 1733480316601, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:36,604 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:36,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 109 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43906 deadline: 1733480316601, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:36,604 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:36,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 109 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43880 deadline: 1733480316601, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:36,604 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:36,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 104 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43864 deadline: 1733480316602, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:36,604 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:36,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 108 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43930 deadline: 1733480316603, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:36,727 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:36,736 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412069a6dd467b2fd4ae08fcdf57ff74497eb_cba9fa4bb7ad0155608756e918c3bf01 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412069a6dd467b2fd4ae08fcdf57ff74497eb_cba9fa4bb7ad0155608756e918c3bf01 2024-12-06T10:17:36,737 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/.tmp/A/6a273b33fe5a490f9007c4b1f7e299d7, store: [table=TestAcidGuarantees family=A region=cba9fa4bb7ad0155608756e918c3bf01] 2024-12-06T10:17:36,738 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/.tmp/A/6a273b33fe5a490f9007c4b1f7e299d7 is 175, key is test_row_0/A:col10/1733480255169/Put/seqid=0 2024-12-06T10:17:36,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=55 2024-12-06T10:17:36,743 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742038_1214 (size=31105) 2024-12-06T10:17:36,745 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,33397,1733480204743 2024-12-06T10:17:36,746 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33397 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=56 2024-12-06T10:17:36,746 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01. 2024-12-06T10:17:36,746 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01. as already flushing 2024-12-06T10:17:36,746 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01. 2024-12-06T10:17:36,746 ERROR [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] handler.RSProcedureHandler(58): pid=56 java.io.IOException: Unable to complete flush {ENCODED => cba9fa4bb7ad0155608756e918c3bf01, NAME => 'TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:17:36,746 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=56 java.io.IOException: Unable to complete flush {ENCODED => cba9fa4bb7ad0155608756e918c3bf01, NAME => 'TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:17:36,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.HMaster(4114): Remote procedure failed, pid=56 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => cba9fa4bb7ad0155608756e918c3bf01, NAME => 'TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => cba9fa4bb7ad0155608756e918c3bf01, NAME => 'TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:17:36,899 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,33397,1733480204743 2024-12-06T10:17:36,899 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33397 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=56 2024-12-06T10:17:36,899 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01. 2024-12-06T10:17:36,899 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01. as already flushing 2024-12-06T10:17:36,899 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01. 2024-12-06T10:17:36,899 ERROR [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] handler.RSProcedureHandler(58): pid=56 java.io.IOException: Unable to complete flush {ENCODED => cba9fa4bb7ad0155608756e918c3bf01, NAME => 'TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:17:36,900 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=56 java.io.IOException: Unable to complete flush {ENCODED => cba9fa4bb7ad0155608756e918c3bf01, NAME => 'TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:17:36,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.HMaster(4114): Remote procedure failed, pid=56 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => cba9fa4bb7ad0155608756e918c3bf01, NAME => 'TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => cba9fa4bb7ad0155608756e918c3bf01, NAME => 'TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:17:36,907 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:36,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 111 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43906 deadline: 1733480316906, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:36,907 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:36,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 111 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43880 deadline: 1733480316906, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:36,909 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:36,909 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:36,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 108 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43884 deadline: 1733480316908, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:36,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 106 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43864 deadline: 1733480316908, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:36,909 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:36,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 110 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43930 deadline: 1733480316908, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:37,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=55 2024-12-06T10:17:37,052 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,33397,1733480204743 2024-12-06T10:17:37,052 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33397 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=56 2024-12-06T10:17:37,052 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01. 2024-12-06T10:17:37,052 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01. as already flushing 2024-12-06T10:17:37,052 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01. 2024-12-06T10:17:37,053 ERROR [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] handler.RSProcedureHandler(58): pid=56 java.io.IOException: Unable to complete flush {ENCODED => cba9fa4bb7ad0155608756e918c3bf01, NAME => 'TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:17:37,053 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=56 java.io.IOException: Unable to complete flush {ENCODED => cba9fa4bb7ad0155608756e918c3bf01, NAME => 'TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:17:37,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.HMaster(4114): Remote procedure failed, pid=56 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => cba9fa4bb7ad0155608756e918c3bf01, NAME => 'TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => cba9fa4bb7ad0155608756e918c3bf01, NAME => 'TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:17:37,144 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=214, memsize=49.2 K, hasBloomFilter=true, into tmp file hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/.tmp/A/6a273b33fe5a490f9007c4b1f7e299d7 2024-12-06T10:17:37,152 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/.tmp/B/50ec457f2e8b41489055ad088bdf0737 is 50, key is test_row_0/B:col10/1733480255169/Put/seqid=0 2024-12-06T10:17:37,159 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742039_1215 (size=12151) 2024-12-06T10:17:37,160 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=214 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/.tmp/B/50ec457f2e8b41489055ad088bdf0737 2024-12-06T10:17:37,168 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/.tmp/C/2e78db90320c4cfa88e1cc2e912533dd is 50, key is test_row_0/C:col10/1733480255169/Put/seqid=0 2024-12-06T10:17:37,173 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742040_1216 (size=12151) 2024-12-06T10:17:37,204 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,33397,1733480204743 2024-12-06T10:17:37,205 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33397 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=56 2024-12-06T10:17:37,205 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01. 2024-12-06T10:17:37,205 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01. as already flushing 2024-12-06T10:17:37,205 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01. 2024-12-06T10:17:37,205 ERROR [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] handler.RSProcedureHandler(58): pid=56 java.io.IOException: Unable to complete flush {ENCODED => cba9fa4bb7ad0155608756e918c3bf01, NAME => 'TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:17:37,206 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=56 java.io.IOException: Unable to complete flush {ENCODED => cba9fa4bb7ad0155608756e918c3bf01, NAME => 'TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:17:37,206 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.HMaster(4114): Remote procedure failed, pid=56 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => cba9fa4bb7ad0155608756e918c3bf01, NAME => 'TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => cba9fa4bb7ad0155608756e918c3bf01, NAME => 'TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:17:37,358 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,33397,1733480204743 2024-12-06T10:17:37,359 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33397 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=56 2024-12-06T10:17:37,359 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01. 2024-12-06T10:17:37,359 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01. as already flushing 2024-12-06T10:17:37,359 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01. 2024-12-06T10:17:37,359 ERROR [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] handler.RSProcedureHandler(58): pid=56 java.io.IOException: Unable to complete flush {ENCODED => cba9fa4bb7ad0155608756e918c3bf01, NAME => 'TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:17:37,359 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=56 java.io.IOException: Unable to complete flush {ENCODED => cba9fa4bb7ad0155608756e918c3bf01, NAME => 'TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:17:37,360 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.HMaster(4114): Remote procedure failed, pid=56 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => cba9fa4bb7ad0155608756e918c3bf01, NAME => 'TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => cba9fa4bb7ad0155608756e918c3bf01, NAME => 'TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:17:37,411 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:37,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 108 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43864 deadline: 1733480317411, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:37,412 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:37,412 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 113 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43880 deadline: 1733480317411, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:37,413 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:37,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 113 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43906 deadline: 1733480317411, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:37,416 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:37,416 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 110 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43884 deadline: 1733480317413, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:37,416 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:37,416 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 112 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43930 deadline: 1733480317414, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:37,511 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,33397,1733480204743 2024-12-06T10:17:37,512 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33397 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=56 2024-12-06T10:17:37,512 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01. 2024-12-06T10:17:37,512 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01. as already flushing 2024-12-06T10:17:37,512 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01. 2024-12-06T10:17:37,512 ERROR [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] handler.RSProcedureHandler(58): pid=56 java.io.IOException: Unable to complete flush {ENCODED => cba9fa4bb7ad0155608756e918c3bf01, NAME => 'TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:17:37,512 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=56 java.io.IOException: Unable to complete flush {ENCODED => cba9fa4bb7ad0155608756e918c3bf01, NAME => 'TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:17:37,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.HMaster(4114): Remote procedure failed, pid=56 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => cba9fa4bb7ad0155608756e918c3bf01, NAME => 'TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => cba9fa4bb7ad0155608756e918c3bf01, NAME => 'TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:17:37,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=55 2024-12-06T10:17:37,574 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=214 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/.tmp/C/2e78db90320c4cfa88e1cc2e912533dd 2024-12-06T10:17:37,580 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/.tmp/A/6a273b33fe5a490f9007c4b1f7e299d7 as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/A/6a273b33fe5a490f9007c4b1f7e299d7 2024-12-06T10:17:37,584 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/A/6a273b33fe5a490f9007c4b1f7e299d7, entries=150, sequenceid=214, filesize=30.4 K 2024-12-06T10:17:37,586 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/.tmp/B/50ec457f2e8b41489055ad088bdf0737 as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/B/50ec457f2e8b41489055ad088bdf0737 2024-12-06T10:17:37,590 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/B/50ec457f2e8b41489055ad088bdf0737, entries=150, sequenceid=214, filesize=11.9 K 2024-12-06T10:17:37,591 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/.tmp/C/2e78db90320c4cfa88e1cc2e912533dd as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/C/2e78db90320c4cfa88e1cc2e912533dd 2024-12-06T10:17:37,595 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/C/2e78db90320c4cfa88e1cc2e912533dd, entries=150, sequenceid=214, filesize=11.9 K 2024-12-06T10:17:37,596 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=53.67 KB/54960 for cba9fa4bb7ad0155608756e918c3bf01 in 1311ms, sequenceid=214, compaction requested=true 2024-12-06T10:17:37,596 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for cba9fa4bb7ad0155608756e918c3bf01: 2024-12-06T10:17:37,597 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store cba9fa4bb7ad0155608756e918c3bf01:A, priority=-2147483648, current under compaction store size is 1 2024-12-06T10:17:37,597 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T10:17:37,597 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store cba9fa4bb7ad0155608756e918c3bf01:B, priority=-2147483648, current under compaction store size is 2 2024-12-06T10:17:37,597 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T10:17:37,597 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store cba9fa4bb7ad0155608756e918c3bf01:C, priority=-2147483648, current under compaction store size is 3 2024-12-06T10:17:37,597 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-06T10:17:37,597 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-06T10:17:37,597 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-06T10:17:37,598 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36829 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-06T10:17:37,598 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 93691 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-06T10:17:37,598 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HStore(1540): cba9fa4bb7ad0155608756e918c3bf01/B is initiating minor compaction (all files) 2024-12-06T10:17:37,598 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HStore(1540): cba9fa4bb7ad0155608756e918c3bf01/A is initiating minor compaction (all files) 2024-12-06T10:17:37,598 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of cba9fa4bb7ad0155608756e918c3bf01/B in TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01. 2024-12-06T10:17:37,598 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of cba9fa4bb7ad0155608756e918c3bf01/A in TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01. 2024-12-06T10:17:37,598 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/B/ecdd551d849c4eef91237be3d0585d05, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/B/b260896510a44fa5a5aec4232637e3ce, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/B/50ec457f2e8b41489055ad088bdf0737] into tmpdir=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/.tmp, totalSize=36.0 K 2024-12-06T10:17:37,598 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/A/7699a17437c248c1b7fce57023e8ae33, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/A/d578d4c3d0d24f3b876898bd5904bcda, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/A/6a273b33fe5a490f9007c4b1f7e299d7] into tmpdir=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/.tmp, totalSize=91.5 K 2024-12-06T10:17:37,598 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01. 2024-12-06T10:17:37,598 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01. files: [hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/A/7699a17437c248c1b7fce57023e8ae33, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/A/d578d4c3d0d24f3b876898bd5904bcda, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/A/6a273b33fe5a490f9007c4b1f7e299d7] 2024-12-06T10:17:37,598 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.Compactor(224): Compacting ecdd551d849c4eef91237be3d0585d05, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=173, earliestPutTs=1733480253709 2024-12-06T10:17:37,599 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.Compactor(224): Compacting 7699a17437c248c1b7fce57023e8ae33, keycount=150, bloomtype=ROW, size=30.7 K, encoding=NONE, compression=NONE, seqNum=173, earliestPutTs=1733480253709 2024-12-06T10:17:37,599 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.Compactor(224): Compacting b260896510a44fa5a5aec4232637e3ce, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=186, earliestPutTs=1733480254030 2024-12-06T10:17:37,599 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.Compactor(224): Compacting d578d4c3d0d24f3b876898bd5904bcda, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=186, earliestPutTs=1733480254030 2024-12-06T10:17:37,599 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.Compactor(224): Compacting 50ec457f2e8b41489055ad088bdf0737, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=214, earliestPutTs=1733480255169 2024-12-06T10:17:37,599 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.Compactor(224): Compacting 6a273b33fe5a490f9007c4b1f7e299d7, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=214, earliestPutTs=1733480255169 2024-12-06T10:17:37,606 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=cba9fa4bb7ad0155608756e918c3bf01] 2024-12-06T10:17:37,607 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): cba9fa4bb7ad0155608756e918c3bf01#B#compaction#185 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-06T10:17:37,608 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/.tmp/B/f86afad75dff4bbc8537aa2bf3f6baed is 50, key is test_row_0/B:col10/1733480255169/Put/seqid=0 2024-12-06T10:17:37,608 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241206a0f5c5c598d54a78b49854dbb7df8df4_cba9fa4bb7ad0155608756e918c3bf01 store=[table=TestAcidGuarantees family=A region=cba9fa4bb7ad0155608756e918c3bf01] 2024-12-06T10:17:37,610 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241206a0f5c5c598d54a78b49854dbb7df8df4_cba9fa4bb7ad0155608756e918c3bf01, store=[table=TestAcidGuarantees family=A region=cba9fa4bb7ad0155608756e918c3bf01] 2024-12-06T10:17:37,610 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241206a0f5c5c598d54a78b49854dbb7df8df4_cba9fa4bb7ad0155608756e918c3bf01 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=cba9fa4bb7ad0155608756e918c3bf01] 2024-12-06T10:17:37,622 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742041_1217 (size=12629) 2024-12-06T10:17:37,623 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742042_1218 (size=4469) 2024-12-06T10:17:37,629 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/.tmp/B/f86afad75dff4bbc8537aa2bf3f6baed as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/B/f86afad75dff4bbc8537aa2bf3f6baed 2024-12-06T10:17:37,634 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in cba9fa4bb7ad0155608756e918c3bf01/B of cba9fa4bb7ad0155608756e918c3bf01 into f86afad75dff4bbc8537aa2bf3f6baed(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-06T10:17:37,634 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for cba9fa4bb7ad0155608756e918c3bf01: 2024-12-06T10:17:37,635 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01., storeName=cba9fa4bb7ad0155608756e918c3bf01/B, priority=13, startTime=1733480257597; duration=0sec 2024-12-06T10:17:37,635 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-06T10:17:37,635 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: cba9fa4bb7ad0155608756e918c3bf01:B 2024-12-06T10:17:37,635 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-06T10:17:37,637 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36829 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-06T10:17:37,637 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HStore(1540): cba9fa4bb7ad0155608756e918c3bf01/C is initiating minor compaction (all files) 2024-12-06T10:17:37,637 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of cba9fa4bb7ad0155608756e918c3bf01/C in TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01. 2024-12-06T10:17:37,637 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/C/fb88e4b85e2947cbb307758b80d36890, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/C/098cd0e195024eeda001cbb81079ff8c, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/C/2e78db90320c4cfa88e1cc2e912533dd] into tmpdir=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/.tmp, totalSize=36.0 K 2024-12-06T10:17:37,638 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.Compactor(224): Compacting fb88e4b85e2947cbb307758b80d36890, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=173, earliestPutTs=1733480253709 2024-12-06T10:17:37,638 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.Compactor(224): Compacting 098cd0e195024eeda001cbb81079ff8c, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=186, earliestPutTs=1733480254030 2024-12-06T10:17:37,638 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.Compactor(224): Compacting 2e78db90320c4cfa88e1cc2e912533dd, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=214, earliestPutTs=1733480255169 2024-12-06T10:17:37,650 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): cba9fa4bb7ad0155608756e918c3bf01#C#compaction#187 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-06T10:17:37,651 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/.tmp/C/426728a358734bdf8fcb3b20f536e6b2 is 50, key is test_row_0/C:col10/1733480255169/Put/seqid=0 2024-12-06T10:17:37,664 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,33397,1733480204743 2024-12-06T10:17:37,665 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33397 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=56 2024-12-06T10:17:37,665 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01. 2024-12-06T10:17:37,666 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegion(2837): Flushing cba9fa4bb7ad0155608756e918c3bf01 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-06T10:17:37,666 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cba9fa4bb7ad0155608756e918c3bf01, store=A 2024-12-06T10:17:37,666 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:17:37,666 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cba9fa4bb7ad0155608756e918c3bf01, store=B 2024-12-06T10:17:37,666 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:17:37,666 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cba9fa4bb7ad0155608756e918c3bf01, store=C 2024-12-06T10:17:37,667 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:17:37,667 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742043_1219 (size=12629) 2024-12-06T10:17:37,688 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241206b76df7908c8e40a9a15fad81a10e27bc_cba9fa4bb7ad0155608756e918c3bf01 is 50, key is test_row_0/A:col10/1733480256295/Put/seqid=0 2024-12-06T10:17:37,690 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/.tmp/C/426728a358734bdf8fcb3b20f536e6b2 as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/C/426728a358734bdf8fcb3b20f536e6b2 2024-12-06T10:17:37,696 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in cba9fa4bb7ad0155608756e918c3bf01/C of cba9fa4bb7ad0155608756e918c3bf01 into 426728a358734bdf8fcb3b20f536e6b2(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-06T10:17:37,696 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for cba9fa4bb7ad0155608756e918c3bf01: 2024-12-06T10:17:37,696 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01., storeName=cba9fa4bb7ad0155608756e918c3bf01/C, priority=13, startTime=1733480257597; duration=0sec 2024-12-06T10:17:37,696 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T10:17:37,696 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: cba9fa4bb7ad0155608756e918c3bf01:C 2024-12-06T10:17:37,700 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742044_1220 (size=12304) 2024-12-06T10:17:37,701 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:37,705 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241206b76df7908c8e40a9a15fad81a10e27bc_cba9fa4bb7ad0155608756e918c3bf01 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241206b76df7908c8e40a9a15fad81a10e27bc_cba9fa4bb7ad0155608756e918c3bf01 2024-12-06T10:17:37,707 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/.tmp/A/c201f29c244c45fd99554f2be8ff45bc, store: [table=TestAcidGuarantees family=A region=cba9fa4bb7ad0155608756e918c3bf01] 2024-12-06T10:17:37,707 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/.tmp/A/c201f29c244c45fd99554f2be8ff45bc is 175, key is test_row_0/A:col10/1733480256295/Put/seqid=0 2024-12-06T10:17:37,711 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742045_1221 (size=31105) 2024-12-06T10:17:38,025 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): cba9fa4bb7ad0155608756e918c3bf01#A#compaction#186 average throughput is 0.06 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-06T10:17:38,026 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/.tmp/A/bc8ae7dc610d4b2d8d534187ed89f250 is 175, key is test_row_0/A:col10/1733480255169/Put/seqid=0 2024-12-06T10:17:38,036 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742046_1222 (size=31583) 2024-12-06T10:17:38,042 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/.tmp/A/bc8ae7dc610d4b2d8d534187ed89f250 as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/A/bc8ae7dc610d4b2d8d534187ed89f250 2024-12-06T10:17:38,048 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in cba9fa4bb7ad0155608756e918c3bf01/A of cba9fa4bb7ad0155608756e918c3bf01 into bc8ae7dc610d4b2d8d534187ed89f250(size=30.8 K), total size for store is 30.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-06T10:17:38,048 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for cba9fa4bb7ad0155608756e918c3bf01: 2024-12-06T10:17:38,048 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01., storeName=cba9fa4bb7ad0155608756e918c3bf01/A, priority=13, startTime=1733480257596; duration=0sec 2024-12-06T10:17:38,048 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T10:17:38,048 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: cba9fa4bb7ad0155608756e918c3bf01:A 2024-12-06T10:17:38,113 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=226, memsize=17.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/.tmp/A/c201f29c244c45fd99554f2be8ff45bc 2024-12-06T10:17:38,121 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/.tmp/B/200d4195e41746878612d51bb14f49f2 is 50, key is test_row_0/B:col10/1733480256295/Put/seqid=0 2024-12-06T10:17:38,133 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742047_1223 (size=12151) 2024-12-06T10:17:38,136 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=226 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/.tmp/B/200d4195e41746878612d51bb14f49f2 2024-12-06T10:17:38,147 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/.tmp/C/064a9cca4f9642d080ddc1c3b6ea6aa3 is 50, key is test_row_0/C:col10/1733480256295/Put/seqid=0 2024-12-06T10:17:38,156 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742048_1224 (size=12151) 2024-12-06T10:17:38,157 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=226 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/.tmp/C/064a9cca4f9642d080ddc1c3b6ea6aa3 2024-12-06T10:17:38,163 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/.tmp/A/c201f29c244c45fd99554f2be8ff45bc as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/A/c201f29c244c45fd99554f2be8ff45bc 2024-12-06T10:17:38,168 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/A/c201f29c244c45fd99554f2be8ff45bc, entries=150, sequenceid=226, filesize=30.4 K 2024-12-06T10:17:38,170 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/.tmp/B/200d4195e41746878612d51bb14f49f2 as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/B/200d4195e41746878612d51bb14f49f2 2024-12-06T10:17:38,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,174 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/B/200d4195e41746878612d51bb14f49f2, entries=150, sequenceid=226, filesize=11.9 K 2024-12-06T10:17:38,175 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/.tmp/C/064a9cca4f9642d080ddc1c3b6ea6aa3 as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/C/064a9cca4f9642d080ddc1c3b6ea6aa3 2024-12-06T10:17:38,179 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/C/064a9cca4f9642d080ddc1c3b6ea6aa3, entries=150, sequenceid=226, filesize=11.9 K 2024-12-06T10:17:38,180 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=0 B/0 for cba9fa4bb7ad0155608756e918c3bf01 in 514ms, sequenceid=226, compaction requested=false 2024-12-06T10:17:38,180 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegion(2538): Flush status journal for cba9fa4bb7ad0155608756e918c3bf01: 2024-12-06T10:17:38,180 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01. 2024-12-06T10:17:38,180 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,180 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=56 2024-12-06T10:17:38,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.HMaster(4106): Remote procedure done, pid=56 2024-12-06T10:17:38,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,183 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=56, resume processing ppid=55 2024-12-06T10:17:38,183 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=56, ppid=55, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.7410 sec 2024-12-06T10:17:38,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,184 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,184 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=55, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=55, table=TestAcidGuarantees in 1.7470 sec 2024-12-06T10:17:38,185 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,185 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,185 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,188 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,188 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,193 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,193 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,193 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,194 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,194 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,194 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,194 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,194 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,195 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,195 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,195 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,195 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,196 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,196 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,197 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,197 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,199 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,199 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,199 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,199 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,200 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,200 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,200 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,201 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,201 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,201 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,202 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,202 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,203 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,203 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,203 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,203 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,204 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,204 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,204 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,204 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,206 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,206 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,206 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,206 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,207 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,207 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,207 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,208 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,208 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,208 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,208 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,212 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,212 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,212 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,214 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,214 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,217 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,217 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,217 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,219 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,219 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,220 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,220 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,220 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,221 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,221 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,221 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,222 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,222 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,222 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,222 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,223 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,223 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,223 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,224 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,224 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,224 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,224 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,225 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,225 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,225 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,226 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,226 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,226 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,227 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,227 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,228 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,229 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,230 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,231 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,232 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,233 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,233 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,233 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,234 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,234 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,235 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,235 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,235 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,236 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,236 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,237 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,237 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,237 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,238 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,238 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,238 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,239 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,239 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,239 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,239 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,240 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,240 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,240 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,241 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,241 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,242 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,242 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,243 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,244 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,244 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,245 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,245 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,246 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,247 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,247 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,248 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,248 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,249 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,249 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,249 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,250 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,250 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,250 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,250 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,251 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,251 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,252 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,252 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,252 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,253 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,253 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,253 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,253 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,254 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,254 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,254 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,257 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,257 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,257 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,258 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,258 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,258 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,259 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,259 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,259 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,260 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,260 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,260 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,261 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,261 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,262 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,262 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,262 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,262 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,265 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,265 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,265 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,265 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,266 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,266 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,267 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,267 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,267 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,268 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,268 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,268 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,268 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,269 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,269 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,269 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,270 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,270 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,270 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,271 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,271 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,271 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,272 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,272 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,272 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,276 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,276 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,276 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,276 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,276 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,277 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,277 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,277 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,279 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,279 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,282 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,282 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,284 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,284 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,284 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,284 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,290 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,290 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,290 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,290 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,292 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,300 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,300 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,300 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,303 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,303 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,303 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,303 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,303 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,304 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,304 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,304 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,306 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,306 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,306 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,306 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,307 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,307 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,307 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,308 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,308 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,308 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,309 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,309 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,309 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,311 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,311 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,314 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,314 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,314 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,314 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,315 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,315 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,315 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,321 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,321 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,323 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,324 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,324 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,329 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,329 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,329 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,330 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,330 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,331 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,331 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,331 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,334 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,335 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,336 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,336 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,336 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,337 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,337 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,337 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,337 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,339 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,339 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,339 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,340 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,340 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,340 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,340 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,342 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,342 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,342 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,351 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,351 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,351 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,351 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,352 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,352 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,352 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,360 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,360 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,360 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,360 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,361 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,361 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,361 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,361 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,364 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,364 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,365 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,365 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,365 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,365 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,369 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,369 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,369 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,370 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,370 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,370 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,370 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,379 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,379 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,379 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,382 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,382 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,383 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,383 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,383 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,384 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,384 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,395 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,395 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,397 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,397 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,397 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,399 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,399 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,399 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,399 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,399 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,403 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,403 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,403 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,403 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,408 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,408 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,408 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,408 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,412 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,412 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,412 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,416 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,416 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,416 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,419 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,419 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,419 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,419 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,420 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,420 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,420 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,421 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,421 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,421 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,421 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,422 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,422 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,422 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,422 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,428 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,428 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,428 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,428 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,434 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,434 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,434 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,436 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,436 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,443 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,443 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,443 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,443 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,443 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,444 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,444 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,444 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,444 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,445 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,445 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,445 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,445 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,454 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,454 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,457 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,457 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,457 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,458 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,458 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,458 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,458 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,461 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,461 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,461 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,468 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,468 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,468 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,470 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,470 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,470 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,470 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,471 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,471 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,471 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(8581): Flush requested on cba9fa4bb7ad0155608756e918c3bf01 2024-12-06T10:17:38,502 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing cba9fa4bb7ad0155608756e918c3bf01 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-06T10:17:38,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,503 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cba9fa4bb7ad0155608756e918c3bf01, store=A 2024-12-06T10:17:38,503 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:17:38,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,503 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cba9fa4bb7ad0155608756e918c3bf01, store=B 2024-12-06T10:17:38,503 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:17:38,503 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cba9fa4bb7ad0155608756e918c3bf01, store=C 2024-12-06T10:17:38,503 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:17:38,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,506 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,506 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,506 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,526 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412068a4474191c7e4a1d8f1de21f51977996_cba9fa4bb7ad0155608756e918c3bf01 is 50, key is test_row_0/A:col10/1733480258500/Put/seqid=0 2024-12-06T10:17:38,538 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:38,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 116 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43864 deadline: 1733480318533, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:38,541 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:38,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 118 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43884 deadline: 1733480318536, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:38,541 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:38,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 121 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43880 deadline: 1733480318536, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:38,541 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:38,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 120 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43930 deadline: 1733480318537, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:38,542 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:38,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 121 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43906 deadline: 1733480318538, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:38,543 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742050_1226 (size=27248) 2024-12-06T10:17:38,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=55 2024-12-06T10:17:38,544 INFO [Thread-774 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 55 completed 2024-12-06T10:17:38,563 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-06T10:17:38,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] procedure2.ProcedureExecutor(1098): Stored pid=57, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=57, table=TestAcidGuarantees 2024-12-06T10:17:38,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=57 2024-12-06T10:17:38,565 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=57, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=57, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-06T10:17:38,566 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=57, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=57, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-06T10:17:38,566 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=58, ppid=57, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-06T10:17:38,641 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:38,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 118 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43864 deadline: 1733480318639, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:38,644 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:38,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 120 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43884 deadline: 1733480318642, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:38,644 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:38,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 123 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43880 deadline: 1733480318642, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:38,645 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:38,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 122 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43930 deadline: 1733480318642, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:38,645 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:38,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 123 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43906 deadline: 1733480318643, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:38,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=57 2024-12-06T10:17:38,719 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,33397,1733480204743 2024-12-06T10:17:38,720 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33397 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=58 2024-12-06T10:17:38,720 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01. 2024-12-06T10:17:38,720 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01. as already flushing 2024-12-06T10:17:38,720 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01. 2024-12-06T10:17:38,720 ERROR [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] handler.RSProcedureHandler(58): pid=58 java.io.IOException: Unable to complete flush {ENCODED => cba9fa4bb7ad0155608756e918c3bf01, NAME => 'TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:17:38,720 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=58 java.io.IOException: Unable to complete flush {ENCODED => cba9fa4bb7ad0155608756e918c3bf01, NAME => 'TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:17:38,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.HMaster(4114): Remote procedure failed, pid=58 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => cba9fa4bb7ad0155608756e918c3bf01, NAME => 'TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => cba9fa4bb7ad0155608756e918c3bf01, NAME => 'TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:17:38,845 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:38,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 120 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43864 deadline: 1733480318843, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:38,846 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:38,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 122 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43884 deadline: 1733480318845, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:38,846 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:38,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 125 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43880 deadline: 1733480318845, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:38,847 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:38,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 125 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43906 deadline: 1733480318846, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:38,848 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:38,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 124 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43930 deadline: 1733480318847, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:38,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=57 2024-12-06T10:17:38,872 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,33397,1733480204743 2024-12-06T10:17:38,873 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33397 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=58 2024-12-06T10:17:38,873 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01. 2024-12-06T10:17:38,873 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01. as already flushing 2024-12-06T10:17:38,873 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01. 2024-12-06T10:17:38,873 ERROR [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] handler.RSProcedureHandler(58): pid=58 java.io.IOException: Unable to complete flush {ENCODED => cba9fa4bb7ad0155608756e918c3bf01, NAME => 'TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:17:38,873 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=58 java.io.IOException: Unable to complete flush {ENCODED => cba9fa4bb7ad0155608756e918c3bf01, NAME => 'TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:17:38,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.HMaster(4114): Remote procedure failed, pid=58 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => cba9fa4bb7ad0155608756e918c3bf01, NAME => 'TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => cba9fa4bb7ad0155608756e918c3bf01, NAME => 'TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:17:38,945 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:38,950 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412068a4474191c7e4a1d8f1de21f51977996_cba9fa4bb7ad0155608756e918c3bf01 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412068a4474191c7e4a1d8f1de21f51977996_cba9fa4bb7ad0155608756e918c3bf01 2024-12-06T10:17:38,952 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/.tmp/A/d6f6a7ca88ea4495a9cbdd21fc7bca39, store: [table=TestAcidGuarantees family=A region=cba9fa4bb7ad0155608756e918c3bf01] 2024-12-06T10:17:38,953 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/.tmp/A/d6f6a7ca88ea4495a9cbdd21fc7bca39 is 175, key is test_row_0/A:col10/1733480258500/Put/seqid=0 2024-12-06T10:17:38,955 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742049_1225 (size=83034) 2024-12-06T10:17:38,965 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=240, memsize=20.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/.tmp/A/d6f6a7ca88ea4495a9cbdd21fc7bca39 2024-12-06T10:17:38,994 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/.tmp/B/320c23b6ccc04d07b04f0ca0e67df7b2 is 50, key is test_row_0/B:col10/1733480258500/Put/seqid=0 2024-12-06T10:17:39,026 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,33397,1733480204743 2024-12-06T10:17:39,026 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33397 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=58 2024-12-06T10:17:39,026 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01. 2024-12-06T10:17:39,027 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01. as already flushing 2024-12-06T10:17:39,027 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01. 2024-12-06T10:17:39,027 ERROR [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] handler.RSProcedureHandler(58): pid=58 java.io.IOException: Unable to complete flush {ENCODED => cba9fa4bb7ad0155608756e918c3bf01, NAME => 'TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:17:39,027 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=58 java.io.IOException: Unable to complete flush {ENCODED => cba9fa4bb7ad0155608756e918c3bf01, NAME => 'TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:17:39,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.HMaster(4114): Remote procedure failed, pid=58 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => cba9fa4bb7ad0155608756e918c3bf01, NAME => 'TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => cba9fa4bb7ad0155608756e918c3bf01, NAME => 'TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:17:39,034 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742051_1227 (size=12151) 2024-12-06T10:17:39,037 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=240 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/.tmp/B/320c23b6ccc04d07b04f0ca0e67df7b2 2024-12-06T10:17:39,058 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/.tmp/C/4b9ddee9ebed4d96b6d56c51cb01dbf0 is 50, key is test_row_0/C:col10/1733480258500/Put/seqid=0 2024-12-06T10:17:39,083 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742052_1228 (size=12151) 2024-12-06T10:17:39,152 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:39,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 122 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43864 deadline: 1733480319149, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:39,152 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:39,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 124 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43884 deadline: 1733480319149, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:39,152 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:39,153 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 127 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43880 deadline: 1733480319149, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:39,153 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:39,153 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:39,153 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 126 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43930 deadline: 1733480319150, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:39,153 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 127 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43906 deadline: 1733480319150, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:39,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=57 2024-12-06T10:17:39,179 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,33397,1733480204743 2024-12-06T10:17:39,180 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33397 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=58 2024-12-06T10:17:39,180 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01. 2024-12-06T10:17:39,180 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01. as already flushing 2024-12-06T10:17:39,180 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01. 2024-12-06T10:17:39,180 ERROR [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] handler.RSProcedureHandler(58): pid=58 java.io.IOException: Unable to complete flush {ENCODED => cba9fa4bb7ad0155608756e918c3bf01, NAME => 'TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:17:39,181 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=58 java.io.IOException: Unable to complete flush {ENCODED => cba9fa4bb7ad0155608756e918c3bf01, NAME => 'TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:17:39,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.HMaster(4114): Remote procedure failed, pid=58 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => cba9fa4bb7ad0155608756e918c3bf01, NAME => 'TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => cba9fa4bb7ad0155608756e918c3bf01, NAME => 'TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:17:39,332 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,33397,1733480204743 2024-12-06T10:17:39,333 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33397 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=58 2024-12-06T10:17:39,333 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01. 2024-12-06T10:17:39,333 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01. as already flushing 2024-12-06T10:17:39,333 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01. 2024-12-06T10:17:39,333 ERROR [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] handler.RSProcedureHandler(58): pid=58 java.io.IOException: Unable to complete flush {ENCODED => cba9fa4bb7ad0155608756e918c3bf01, NAME => 'TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:17:39,334 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=58 java.io.IOException: Unable to complete flush {ENCODED => cba9fa4bb7ad0155608756e918c3bf01, NAME => 'TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:17:39,334 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.HMaster(4114): Remote procedure failed, pid=58 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => cba9fa4bb7ad0155608756e918c3bf01, NAME => 'TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => cba9fa4bb7ad0155608756e918c3bf01, NAME => 'TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:17:39,484 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=240 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/.tmp/C/4b9ddee9ebed4d96b6d56c51cb01dbf0 2024-12-06T10:17:39,486 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,33397,1733480204743 2024-12-06T10:17:39,486 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33397 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=58 2024-12-06T10:17:39,486 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01. 2024-12-06T10:17:39,487 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01. as already flushing 2024-12-06T10:17:39,487 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01. 2024-12-06T10:17:39,487 ERROR [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] handler.RSProcedureHandler(58): pid=58 java.io.IOException: Unable to complete flush {ENCODED => cba9fa4bb7ad0155608756e918c3bf01, NAME => 'TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:17:39,487 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=58 java.io.IOException: Unable to complete flush {ENCODED => cba9fa4bb7ad0155608756e918c3bf01, NAME => 'TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:17:39,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.HMaster(4114): Remote procedure failed, pid=58 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => cba9fa4bb7ad0155608756e918c3bf01, NAME => 'TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => cba9fa4bb7ad0155608756e918c3bf01, NAME => 'TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:17:39,490 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/.tmp/A/d6f6a7ca88ea4495a9cbdd21fc7bca39 as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/A/d6f6a7ca88ea4495a9cbdd21fc7bca39 2024-12-06T10:17:39,507 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/A/d6f6a7ca88ea4495a9cbdd21fc7bca39, entries=450, sequenceid=240, filesize=81.1 K 2024-12-06T10:17:39,509 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/.tmp/B/320c23b6ccc04d07b04f0ca0e67df7b2 as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/B/320c23b6ccc04d07b04f0ca0e67df7b2 2024-12-06T10:17:39,514 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/B/320c23b6ccc04d07b04f0ca0e67df7b2, entries=150, sequenceid=240, filesize=11.9 K 2024-12-06T10:17:39,515 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/.tmp/C/4b9ddee9ebed4d96b6d56c51cb01dbf0 as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/C/4b9ddee9ebed4d96b6d56c51cb01dbf0 2024-12-06T10:17:39,523 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/C/4b9ddee9ebed4d96b6d56c51cb01dbf0, entries=150, sequenceid=240, filesize=11.9 K 2024-12-06T10:17:39,525 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for cba9fa4bb7ad0155608756e918c3bf01 in 1023ms, sequenceid=240, compaction requested=true 2024-12-06T10:17:39,525 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for cba9fa4bb7ad0155608756e918c3bf01: 2024-12-06T10:17:39,525 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-06T10:17:39,526 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store cba9fa4bb7ad0155608756e918c3bf01:A, priority=-2147483648, current under compaction store size is 1 2024-12-06T10:17:39,526 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T10:17:39,526 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-06T10:17:39,527 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 145722 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-06T10:17:39,527 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store cba9fa4bb7ad0155608756e918c3bf01:B, priority=-2147483648, current under compaction store size is 2 2024-12-06T10:17:39,527 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T10:17:39,527 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store cba9fa4bb7ad0155608756e918c3bf01:C, priority=-2147483648, current under compaction store size is 3 2024-12-06T10:17:39,527 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HStore(1540): cba9fa4bb7ad0155608756e918c3bf01/A is initiating minor compaction (all files) 2024-12-06T10:17:39,527 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-06T10:17:39,527 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of cba9fa4bb7ad0155608756e918c3bf01/A in TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01. 2024-12-06T10:17:39,527 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/A/bc8ae7dc610d4b2d8d534187ed89f250, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/A/c201f29c244c45fd99554f2be8ff45bc, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/A/d6f6a7ca88ea4495a9cbdd21fc7bca39] into tmpdir=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/.tmp, totalSize=142.3 K 2024-12-06T10:17:39,527 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01. 2024-12-06T10:17:39,527 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01. files: [hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/A/bc8ae7dc610d4b2d8d534187ed89f250, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/A/c201f29c244c45fd99554f2be8ff45bc, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/A/d6f6a7ca88ea4495a9cbdd21fc7bca39] 2024-12-06T10:17:39,528 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.Compactor(224): Compacting bc8ae7dc610d4b2d8d534187ed89f250, keycount=150, bloomtype=ROW, size=30.8 K, encoding=NONE, compression=NONE, seqNum=214, earliestPutTs=1733480255169 2024-12-06T10:17:39,528 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.Compactor(224): Compacting c201f29c244c45fd99554f2be8ff45bc, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=226, earliestPutTs=1733480256294 2024-12-06T10:17:39,529 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36931 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-06T10:17:39,529 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HStore(1540): cba9fa4bb7ad0155608756e918c3bf01/B is initiating minor compaction (all files) 2024-12-06T10:17:39,529 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of cba9fa4bb7ad0155608756e918c3bf01/B in TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01. 2024-12-06T10:17:39,529 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/B/f86afad75dff4bbc8537aa2bf3f6baed, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/B/200d4195e41746878612d51bb14f49f2, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/B/320c23b6ccc04d07b04f0ca0e67df7b2] into tmpdir=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/.tmp, totalSize=36.1 K 2024-12-06T10:17:39,529 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.Compactor(224): Compacting f86afad75dff4bbc8537aa2bf3f6baed, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=214, earliestPutTs=1733480255169 2024-12-06T10:17:39,530 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.Compactor(224): Compacting d6f6a7ca88ea4495a9cbdd21fc7bca39, keycount=450, bloomtype=ROW, size=81.1 K, encoding=NONE, compression=NONE, seqNum=240, earliestPutTs=1733480258488 2024-12-06T10:17:39,530 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.Compactor(224): Compacting 200d4195e41746878612d51bb14f49f2, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=226, earliestPutTs=1733480256294 2024-12-06T10:17:39,531 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.Compactor(224): Compacting 320c23b6ccc04d07b04f0ca0e67df7b2, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=240, earliestPutTs=1733480258498 2024-12-06T10:17:39,550 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): cba9fa4bb7ad0155608756e918c3bf01#B#compaction#194 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-06T10:17:39,551 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/.tmp/B/a631ae2750414318af30c4bee5c4cb4f is 50, key is test_row_0/B:col10/1733480258500/Put/seqid=0 2024-12-06T10:17:39,552 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=cba9fa4bb7ad0155608756e918c3bf01] 2024-12-06T10:17:39,555 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e2024120604cdfcad95a1409bb65ccdb1e2676be8_cba9fa4bb7ad0155608756e918c3bf01 store=[table=TestAcidGuarantees family=A region=cba9fa4bb7ad0155608756e918c3bf01] 2024-12-06T10:17:39,558 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e2024120604cdfcad95a1409bb65ccdb1e2676be8_cba9fa4bb7ad0155608756e918c3bf01, store=[table=TestAcidGuarantees family=A region=cba9fa4bb7ad0155608756e918c3bf01] 2024-12-06T10:17:39,558 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024120604cdfcad95a1409bb65ccdb1e2676be8_cba9fa4bb7ad0155608756e918c3bf01 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=cba9fa4bb7ad0155608756e918c3bf01] 2024-12-06T10:17:39,587 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742053_1229 (size=4469) 2024-12-06T10:17:39,589 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742054_1230 (size=12731) 2024-12-06T10:17:39,602 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/.tmp/B/a631ae2750414318af30c4bee5c4cb4f as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/B/a631ae2750414318af30c4bee5c4cb4f 2024-12-06T10:17:39,610 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in cba9fa4bb7ad0155608756e918c3bf01/B of cba9fa4bb7ad0155608756e918c3bf01 into a631ae2750414318af30c4bee5c4cb4f(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-06T10:17:39,610 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for cba9fa4bb7ad0155608756e918c3bf01: 2024-12-06T10:17:39,611 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01., storeName=cba9fa4bb7ad0155608756e918c3bf01/B, priority=13, startTime=1733480259526; duration=0sec 2024-12-06T10:17:39,611 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-06T10:17:39,611 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: cba9fa4bb7ad0155608756e918c3bf01:B 2024-12-06T10:17:39,611 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-06T10:17:39,613 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36931 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-06T10:17:39,613 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HStore(1540): cba9fa4bb7ad0155608756e918c3bf01/C is initiating minor compaction (all files) 2024-12-06T10:17:39,613 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of cba9fa4bb7ad0155608756e918c3bf01/C in TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01. 2024-12-06T10:17:39,614 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/C/426728a358734bdf8fcb3b20f536e6b2, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/C/064a9cca4f9642d080ddc1c3b6ea6aa3, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/C/4b9ddee9ebed4d96b6d56c51cb01dbf0] into tmpdir=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/.tmp, totalSize=36.1 K 2024-12-06T10:17:39,614 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.Compactor(224): Compacting 426728a358734bdf8fcb3b20f536e6b2, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=214, earliestPutTs=1733480255169 2024-12-06T10:17:39,615 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.Compactor(224): Compacting 064a9cca4f9642d080ddc1c3b6ea6aa3, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=226, earliestPutTs=1733480256294 2024-12-06T10:17:39,615 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.Compactor(224): Compacting 4b9ddee9ebed4d96b6d56c51cb01dbf0, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=240, earliestPutTs=1733480258498 2024-12-06T10:17:39,626 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): cba9fa4bb7ad0155608756e918c3bf01#C#compaction#196 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-06T10:17:39,627 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/.tmp/C/5c37337a8c944e2393acb65e187c697f is 50, key is test_row_0/C:col10/1733480258500/Put/seqid=0 2024-12-06T10:17:39,634 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742055_1231 (size=12731) 2024-12-06T10:17:39,639 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,33397,1733480204743 2024-12-06T10:17:39,640 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33397 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=58 2024-12-06T10:17:39,640 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01. 2024-12-06T10:17:39,640 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegion(2837): Flushing cba9fa4bb7ad0155608756e918c3bf01 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-12-06T10:17:39,640 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cba9fa4bb7ad0155608756e918c3bf01, store=A 2024-12-06T10:17:39,641 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:17:39,641 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cba9fa4bb7ad0155608756e918c3bf01, store=B 2024-12-06T10:17:39,641 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:17:39,641 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cba9fa4bb7ad0155608756e918c3bf01, store=C 2024-12-06T10:17:39,641 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:17:39,649 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241206e43f76e2dee144fd98e9f8b53f2db1a1_cba9fa4bb7ad0155608756e918c3bf01 is 50, key is test_row_0/A:col10/1733480258537/Put/seqid=0 2024-12-06T10:17:39,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(8581): Flush requested on cba9fa4bb7ad0155608756e918c3bf01 2024-12-06T10:17:39,657 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01. as already flushing 2024-12-06T10:17:39,658 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742056_1232 (size=12404) 2024-12-06T10:17:39,659 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:39,663 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241206e43f76e2dee144fd98e9f8b53f2db1a1_cba9fa4bb7ad0155608756e918c3bf01 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241206e43f76e2dee144fd98e9f8b53f2db1a1_cba9fa4bb7ad0155608756e918c3bf01 2024-12-06T10:17:39,664 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/.tmp/A/143a0bccaa4e4452b2b8336368a90149, store: [table=TestAcidGuarantees family=A region=cba9fa4bb7ad0155608756e918c3bf01] 2024-12-06T10:17:39,665 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/.tmp/A/143a0bccaa4e4452b2b8336368a90149 is 175, key is test_row_0/A:col10/1733480258537/Put/seqid=0 2024-12-06T10:17:39,666 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:39,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 128 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43884 deadline: 1733480319663, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:39,667 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:39,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 130 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43906 deadline: 1733480319663, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:39,668 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:39,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 126 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43864 deadline: 1733480319665, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:39,668 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:39,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 130 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43930 deadline: 1733480319666, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:39,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=57 2024-12-06T10:17:39,669 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742057_1233 (size=31205) 2024-12-06T10:17:39,669 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:39,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 131 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43880 deadline: 1733480319666, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:39,670 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=265, memsize=47.0 K, hasBloomFilter=true, into tmp file hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/.tmp/A/143a0bccaa4e4452b2b8336368a90149 2024-12-06T10:17:39,679 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/.tmp/B/5446ed762a054e5ea7c2051ba8781db9 is 50, key is test_row_0/B:col10/1733480258537/Put/seqid=0 2024-12-06T10:17:39,696 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742058_1234 (size=12251) 2024-12-06T10:17:39,769 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:39,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 130 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43884 deadline: 1733480319768, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:39,769 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:39,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 132 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43906 deadline: 1733480319768, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:39,772 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:39,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 128 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43864 deadline: 1733480319769, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:39,773 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:39,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 132 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43930 deadline: 1733480319770, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:39,773 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:39,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 133 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43880 deadline: 1733480319771, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:39,971 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:39,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 132 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43884 deadline: 1733480319970, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:39,974 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:39,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 134 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43906 deadline: 1733480319971, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:39,975 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:39,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 134 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43930 deadline: 1733480319974, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:39,975 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:39,976 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 130 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43864 deadline: 1733480319975, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:39,976 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:39,976 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 135 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43880 deadline: 1733480319975, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:39,989 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): cba9fa4bb7ad0155608756e918c3bf01#A#compaction#195 average throughput is 0.06 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-06T10:17:39,990 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/.tmp/A/f8ef894924c14f5bbc993c44512aa2bb is 175, key is test_row_0/A:col10/1733480258500/Put/seqid=0 2024-12-06T10:17:40,005 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742059_1235 (size=31685) 2024-12-06T10:17:40,013 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/.tmp/A/f8ef894924c14f5bbc993c44512aa2bb as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/A/f8ef894924c14f5bbc993c44512aa2bb 2024-12-06T10:17:40,018 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in cba9fa4bb7ad0155608756e918c3bf01/A of cba9fa4bb7ad0155608756e918c3bf01 into f8ef894924c14f5bbc993c44512aa2bb(size=30.9 K), total size for store is 30.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-06T10:17:40,018 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for cba9fa4bb7ad0155608756e918c3bf01: 2024-12-06T10:17:40,018 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01., storeName=cba9fa4bb7ad0155608756e918c3bf01/A, priority=13, startTime=1733480259525; duration=0sec 2024-12-06T10:17:40,018 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T10:17:40,018 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: cba9fa4bb7ad0155608756e918c3bf01:A 2024-12-06T10:17:40,040 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/.tmp/C/5c37337a8c944e2393acb65e187c697f as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/C/5c37337a8c944e2393acb65e187c697f 2024-12-06T10:17:40,045 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in cba9fa4bb7ad0155608756e918c3bf01/C of cba9fa4bb7ad0155608756e918c3bf01 into 5c37337a8c944e2393acb65e187c697f(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-06T10:17:40,045 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for cba9fa4bb7ad0155608756e918c3bf01: 2024-12-06T10:17:40,045 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01., storeName=cba9fa4bb7ad0155608756e918c3bf01/C, priority=13, startTime=1733480259527; duration=0sec 2024-12-06T10:17:40,045 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T10:17:40,045 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: cba9fa4bb7ad0155608756e918c3bf01:C 2024-12-06T10:17:40,098 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=265 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/.tmp/B/5446ed762a054e5ea7c2051ba8781db9 2024-12-06T10:17:40,108 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/.tmp/C/75fc84dcd0f14ac89a26b1757d99d447 is 50, key is test_row_0/C:col10/1733480258537/Put/seqid=0 2024-12-06T10:17:40,130 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742060_1236 (size=12251) 2024-12-06T10:17:40,273 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:40,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 134 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43884 deadline: 1733480320272, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:40,276 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:40,276 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 136 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43906 deadline: 1733480320276, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:40,277 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:40,277 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 132 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43864 deadline: 1733480320276, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:40,278 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:40,278 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:40,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 137 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43880 deadline: 1733480320277, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:40,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 136 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43930 deadline: 1733480320277, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:40,531 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=265 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/.tmp/C/75fc84dcd0f14ac89a26b1757d99d447 2024-12-06T10:17:40,537 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/.tmp/A/143a0bccaa4e4452b2b8336368a90149 as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/A/143a0bccaa4e4452b2b8336368a90149 2024-12-06T10:17:40,544 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/A/143a0bccaa4e4452b2b8336368a90149, entries=150, sequenceid=265, filesize=30.5 K 2024-12-06T10:17:40,545 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/.tmp/B/5446ed762a054e5ea7c2051ba8781db9 as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/B/5446ed762a054e5ea7c2051ba8781db9 2024-12-06T10:17:40,550 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/B/5446ed762a054e5ea7c2051ba8781db9, entries=150, sequenceid=265, filesize=12.0 K 2024-12-06T10:17:40,552 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/.tmp/C/75fc84dcd0f14ac89a26b1757d99d447 as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/C/75fc84dcd0f14ac89a26b1757d99d447 2024-12-06T10:17:40,557 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/C/75fc84dcd0f14ac89a26b1757d99d447, entries=150, sequenceid=265, filesize=12.0 K 2024-12-06T10:17:40,558 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=60.38 KB/61830 for cba9fa4bb7ad0155608756e918c3bf01 in 918ms, sequenceid=265, compaction requested=false 2024-12-06T10:17:40,558 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegion(2538): Flush status journal for cba9fa4bb7ad0155608756e918c3bf01: 2024-12-06T10:17:40,558 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01. 2024-12-06T10:17:40,558 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=58 2024-12-06T10:17:40,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.HMaster(4106): Remote procedure done, pid=58 2024-12-06T10:17:40,561 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=58, resume processing ppid=57 2024-12-06T10:17:40,561 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=58, ppid=57, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.9930 sec 2024-12-06T10:17:40,563 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=57, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=57, table=TestAcidGuarantees in 1.9980 sec 2024-12-06T10:17:40,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=57 2024-12-06T10:17:40,670 INFO [Thread-774 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 57 completed 2024-12-06T10:17:40,671 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-06T10:17:40,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] procedure2.ProcedureExecutor(1098): Stored pid=59, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=59, table=TestAcidGuarantees 2024-12-06T10:17:40,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=59 2024-12-06T10:17:40,673 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=59, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=59, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-06T10:17:40,673 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=59, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=59, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-06T10:17:40,673 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=60, ppid=59, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-06T10:17:40,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=59 2024-12-06T10:17:40,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(8581): Flush requested on cba9fa4bb7ad0155608756e918c3bf01 2024-12-06T10:17:40,778 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing cba9fa4bb7ad0155608756e918c3bf01 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-12-06T10:17:40,778 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cba9fa4bb7ad0155608756e918c3bf01, store=A 2024-12-06T10:17:40,778 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:17:40,778 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cba9fa4bb7ad0155608756e918c3bf01, store=B 2024-12-06T10:17:40,778 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:17:40,778 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cba9fa4bb7ad0155608756e918c3bf01, store=C 2024-12-06T10:17:40,778 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:17:40,786 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412067dc26658fb9b463da0fe0f762f3d5751_cba9fa4bb7ad0155608756e918c3bf01 is 50, key is test_row_0/A:col10/1733480259664/Put/seqid=0 2024-12-06T10:17:40,792 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742061_1237 (size=12454) 2024-12-06T10:17:40,803 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:40,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 141 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43930 deadline: 1733480320798, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:40,805 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:40,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 143 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43880 deadline: 1733480320801, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:40,805 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:40,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 142 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43906 deadline: 1733480320801, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:40,805 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:40,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 142 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43884 deadline: 1733480320802, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:40,806 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:40,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 138 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43864 deadline: 1733480320803, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:40,825 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,33397,1733480204743 2024-12-06T10:17:40,826 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33397 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=60 2024-12-06T10:17:40,826 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01. 2024-12-06T10:17:40,826 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01. as already flushing 2024-12-06T10:17:40,826 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01. 2024-12-06T10:17:40,826 ERROR [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] handler.RSProcedureHandler(58): pid=60 java.io.IOException: Unable to complete flush {ENCODED => cba9fa4bb7ad0155608756e918c3bf01, NAME => 'TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:17:40,826 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=60 java.io.IOException: Unable to complete flush {ENCODED => cba9fa4bb7ad0155608756e918c3bf01, NAME => 'TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:17:40,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.HMaster(4114): Remote procedure failed, pid=60 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => cba9fa4bb7ad0155608756e918c3bf01, NAME => 'TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => cba9fa4bb7ad0155608756e918c3bf01, NAME => 'TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:17:40,907 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:40,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 143 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43930 deadline: 1733480320904, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:40,907 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:40,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 145 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43880 deadline: 1733480320906, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:40,908 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:40,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 144 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43906 deadline: 1733480320906, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:40,909 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:40,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 144 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43884 deadline: 1733480320907, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:40,910 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:40,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 140 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43864 deadline: 1733480320907, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:40,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=59 2024-12-06T10:17:40,978 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,33397,1733480204743 2024-12-06T10:17:40,978 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33397 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=60 2024-12-06T10:17:40,979 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01. 2024-12-06T10:17:40,979 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01. as already flushing 2024-12-06T10:17:40,979 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01. 2024-12-06T10:17:40,979 ERROR [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] handler.RSProcedureHandler(58): pid=60 java.io.IOException: Unable to complete flush {ENCODED => cba9fa4bb7ad0155608756e918c3bf01, NAME => 'TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:17:40,979 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=60 java.io.IOException: Unable to complete flush {ENCODED => cba9fa4bb7ad0155608756e918c3bf01, NAME => 'TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:17:40,979 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.HMaster(4114): Remote procedure failed, pid=60 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => cba9fa4bb7ad0155608756e918c3bf01, NAME => 'TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => cba9fa4bb7ad0155608756e918c3bf01, NAME => 'TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:17:41,110 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:41,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 145 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43930 deadline: 1733480321108, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:41,110 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:41,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 147 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43880 deadline: 1733480321109, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:41,111 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:41,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 146 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43906 deadline: 1733480321109, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:41,111 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:41,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 146 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43884 deadline: 1733480321110, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:41,112 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:41,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 142 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43864 deadline: 1733480321111, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:41,130 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,33397,1733480204743 2024-12-06T10:17:41,130 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33397 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=60 2024-12-06T10:17:41,130 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01. 2024-12-06T10:17:41,131 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01. as already flushing 2024-12-06T10:17:41,131 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01. 2024-12-06T10:17:41,131 ERROR [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] handler.RSProcedureHandler(58): pid=60 java.io.IOException: Unable to complete flush {ENCODED => cba9fa4bb7ad0155608756e918c3bf01, NAME => 'TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:17:41,131 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=60 java.io.IOException: Unable to complete flush {ENCODED => cba9fa4bb7ad0155608756e918c3bf01, NAME => 'TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:17:41,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.HMaster(4114): Remote procedure failed, pid=60 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => cba9fa4bb7ad0155608756e918c3bf01, NAME => 'TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => cba9fa4bb7ad0155608756e918c3bf01, NAME => 'TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:17:41,193 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:41,197 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412067dc26658fb9b463da0fe0f762f3d5751_cba9fa4bb7ad0155608756e918c3bf01 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412067dc26658fb9b463da0fe0f762f3d5751_cba9fa4bb7ad0155608756e918c3bf01 2024-12-06T10:17:41,199 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/.tmp/A/d339be6ae5da4a43811ec2af2806d36c, store: [table=TestAcidGuarantees family=A region=cba9fa4bb7ad0155608756e918c3bf01] 2024-12-06T10:17:41,199 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/.tmp/A/d339be6ae5da4a43811ec2af2806d36c is 175, key is test_row_0/A:col10/1733480259664/Put/seqid=0 2024-12-06T10:17:41,204 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742062_1238 (size=31255) 2024-12-06T10:17:41,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=59 2024-12-06T10:17:41,283 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,33397,1733480204743 2024-12-06T10:17:41,283 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33397 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=60 2024-12-06T10:17:41,283 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01. 2024-12-06T10:17:41,283 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01. as already flushing 2024-12-06T10:17:41,284 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01. 2024-12-06T10:17:41,284 ERROR [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] handler.RSProcedureHandler(58): pid=60 java.io.IOException: Unable to complete flush {ENCODED => cba9fa4bb7ad0155608756e918c3bf01, NAME => 'TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:17:41,284 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=60 java.io.IOException: Unable to complete flush {ENCODED => cba9fa4bb7ad0155608756e918c3bf01, NAME => 'TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:17:41,284 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.HMaster(4114): Remote procedure failed, pid=60 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => cba9fa4bb7ad0155608756e918c3bf01, NAME => 'TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => cba9fa4bb7ad0155608756e918c3bf01, NAME => 'TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:17:41,412 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:41,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 148 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43906 deadline: 1733480321412, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:41,413 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:41,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 147 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43930 deadline: 1733480321412, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:41,415 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:41,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 149 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43880 deadline: 1733480321413, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:41,415 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:41,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 148 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43884 deadline: 1733480321414, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:41,416 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:41,416 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 144 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43864 deadline: 1733480321415, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:41,436 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,33397,1733480204743 2024-12-06T10:17:41,436 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33397 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=60 2024-12-06T10:17:41,436 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01. 2024-12-06T10:17:41,437 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01. as already flushing 2024-12-06T10:17:41,437 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01. 2024-12-06T10:17:41,437 ERROR [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] handler.RSProcedureHandler(58): pid=60 java.io.IOException: Unable to complete flush {ENCODED => cba9fa4bb7ad0155608756e918c3bf01, NAME => 'TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:17:41,437 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=60 java.io.IOException: Unable to complete flush {ENCODED => cba9fa4bb7ad0155608756e918c3bf01, NAME => 'TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:17:41,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.HMaster(4114): Remote procedure failed, pid=60 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => cba9fa4bb7ad0155608756e918c3bf01, NAME => 'TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => cba9fa4bb7ad0155608756e918c3bf01, NAME => 'TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:17:41,589 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,33397,1733480204743 2024-12-06T10:17:41,589 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33397 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=60 2024-12-06T10:17:41,589 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01. 2024-12-06T10:17:41,590 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01. as already flushing 2024-12-06T10:17:41,590 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01. 2024-12-06T10:17:41,590 ERROR [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] handler.RSProcedureHandler(58): pid=60 java.io.IOException: Unable to complete flush {ENCODED => cba9fa4bb7ad0155608756e918c3bf01, NAME => 'TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:17:41,590 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=60 java.io.IOException: Unable to complete flush {ENCODED => cba9fa4bb7ad0155608756e918c3bf01, NAME => 'TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:17:41,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.HMaster(4114): Remote procedure failed, pid=60 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => cba9fa4bb7ad0155608756e918c3bf01, NAME => 'TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => cba9fa4bb7ad0155608756e918c3bf01, NAME => 'TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:17:41,605 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=280, memsize=22.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/.tmp/A/d339be6ae5da4a43811ec2af2806d36c 2024-12-06T10:17:41,621 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/.tmp/B/6ed9a1d06f1144b9b703079c14e8971b is 50, key is test_row_0/B:col10/1733480259664/Put/seqid=0 2024-12-06T10:17:41,629 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742063_1239 (size=12301) 2024-12-06T10:17:41,751 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,33397,1733480204743 2024-12-06T10:17:41,752 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33397 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=60 2024-12-06T10:17:41,752 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01. 2024-12-06T10:17:41,752 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01. as already flushing 2024-12-06T10:17:41,752 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01. 2024-12-06T10:17:41,752 ERROR [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] handler.RSProcedureHandler(58): pid=60 java.io.IOException: Unable to complete flush {ENCODED => cba9fa4bb7ad0155608756e918c3bf01, NAME => 'TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:17:41,753 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=60 java.io.IOException: Unable to complete flush {ENCODED => cba9fa4bb7ad0155608756e918c3bf01, NAME => 'TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:17:41,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.HMaster(4114): Remote procedure failed, pid=60 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => cba9fa4bb7ad0155608756e918c3bf01, NAME => 'TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => cba9fa4bb7ad0155608756e918c3bf01, NAME => 'TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:17:41,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=59 2024-12-06T10:17:41,906 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,33397,1733480204743 2024-12-06T10:17:41,906 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33397 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=60 2024-12-06T10:17:41,907 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01. 2024-12-06T10:17:41,907 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01. as already flushing 2024-12-06T10:17:41,907 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01. 2024-12-06T10:17:41,907 ERROR [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] handler.RSProcedureHandler(58): pid=60 java.io.IOException: Unable to complete flush {ENCODED => cba9fa4bb7ad0155608756e918c3bf01, NAME => 'TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:17:41,907 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=60 java.io.IOException: Unable to complete flush {ENCODED => cba9fa4bb7ad0155608756e918c3bf01, NAME => 'TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:17:41,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.HMaster(4114): Remote procedure failed, pid=60 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => cba9fa4bb7ad0155608756e918c3bf01, NAME => 'TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => cba9fa4bb7ad0155608756e918c3bf01, NAME => 'TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:17:41,914 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:41,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 149 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43930 deadline: 1733480321914, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:41,918 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:41,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 150 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43906 deadline: 1733480321916, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:41,920 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:41,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 146 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43864 deadline: 1733480321919, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:41,920 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:41,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 151 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43880 deadline: 1733480321919, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:41,920 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:41,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 150 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43884 deadline: 1733480321920, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:42,031 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=280 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/.tmp/B/6ed9a1d06f1144b9b703079c14e8971b 2024-12-06T10:17:42,038 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/.tmp/C/b90b1ea1929546c5b7c82b7dc942fe2e is 50, key is test_row_0/C:col10/1733480259664/Put/seqid=0 2024-12-06T10:17:42,043 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742064_1240 (size=12301) 2024-12-06T10:17:42,043 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=280 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/.tmp/C/b90b1ea1929546c5b7c82b7dc942fe2e 2024-12-06T10:17:42,048 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/.tmp/A/d339be6ae5da4a43811ec2af2806d36c as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/A/d339be6ae5da4a43811ec2af2806d36c 2024-12-06T10:17:42,052 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/A/d339be6ae5da4a43811ec2af2806d36c, entries=150, sequenceid=280, filesize=30.5 K 2024-12-06T10:17:42,053 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/.tmp/B/6ed9a1d06f1144b9b703079c14e8971b as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/B/6ed9a1d06f1144b9b703079c14e8971b 2024-12-06T10:17:42,057 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/B/6ed9a1d06f1144b9b703079c14e8971b, entries=150, sequenceid=280, filesize=12.0 K 2024-12-06T10:17:42,058 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/.tmp/C/b90b1ea1929546c5b7c82b7dc942fe2e as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/C/b90b1ea1929546c5b7c82b7dc942fe2e 2024-12-06T10:17:42,059 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,33397,1733480204743 2024-12-06T10:17:42,060 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33397 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=60 2024-12-06T10:17:42,060 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01. 2024-12-06T10:17:42,060 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01. as already flushing 2024-12-06T10:17:42,060 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01. 2024-12-06T10:17:42,060 ERROR [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] handler.RSProcedureHandler(58): pid=60 java.io.IOException: Unable to complete flush {ENCODED => cba9fa4bb7ad0155608756e918c3bf01, NAME => 'TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:17:42,060 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=60 java.io.IOException: Unable to complete flush {ENCODED => cba9fa4bb7ad0155608756e918c3bf01, NAME => 'TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:17:42,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.HMaster(4114): Remote procedure failed, pid=60 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => cba9fa4bb7ad0155608756e918c3bf01, NAME => 'TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => cba9fa4bb7ad0155608756e918c3bf01, NAME => 'TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:17:42,064 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/C/b90b1ea1929546c5b7c82b7dc942fe2e, entries=150, sequenceid=280, filesize=12.0 K 2024-12-06T10:17:42,069 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=134.18 KB/137400 for cba9fa4bb7ad0155608756e918c3bf01 in 1291ms, sequenceid=280, compaction requested=true 2024-12-06T10:17:42,069 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for cba9fa4bb7ad0155608756e918c3bf01: 2024-12-06T10:17:42,069 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-06T10:17:42,069 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store cba9fa4bb7ad0155608756e918c3bf01:A, priority=-2147483648, current under compaction store size is 1 2024-12-06T10:17:42,070 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T10:17:42,070 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-06T10:17:42,070 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store cba9fa4bb7ad0155608756e918c3bf01:B, priority=-2147483648, current under compaction store size is 2 2024-12-06T10:17:42,071 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 94145 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-06T10:17:42,071 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HStore(1540): cba9fa4bb7ad0155608756e918c3bf01/A is initiating minor compaction (all files) 2024-12-06T10:17:42,071 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of cba9fa4bb7ad0155608756e918c3bf01/A in TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01. 2024-12-06T10:17:42,071 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/A/f8ef894924c14f5bbc993c44512aa2bb, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/A/143a0bccaa4e4452b2b8336368a90149, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/A/d339be6ae5da4a43811ec2af2806d36c] into tmpdir=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/.tmp, totalSize=91.9 K 2024-12-06T10:17:42,071 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01. 2024-12-06T10:17:42,071 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01. files: [hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/A/f8ef894924c14f5bbc993c44512aa2bb, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/A/143a0bccaa4e4452b2b8336368a90149, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/A/d339be6ae5da4a43811ec2af2806d36c] 2024-12-06T10:17:42,072 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37283 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-06T10:17:42,072 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HStore(1540): cba9fa4bb7ad0155608756e918c3bf01/B is initiating minor compaction (all files) 2024-12-06T10:17:42,072 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of cba9fa4bb7ad0155608756e918c3bf01/B in TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01. 2024-12-06T10:17:42,072 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/B/a631ae2750414318af30c4bee5c4cb4f, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/B/5446ed762a054e5ea7c2051ba8781db9, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/B/6ed9a1d06f1144b9b703079c14e8971b] into tmpdir=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/.tmp, totalSize=36.4 K 2024-12-06T10:17:42,072 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.Compactor(224): Compacting f8ef894924c14f5bbc993c44512aa2bb, keycount=150, bloomtype=ROW, size=30.9 K, encoding=NONE, compression=NONE, seqNum=240, earliestPutTs=1733480258498 2024-12-06T10:17:42,070 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T10:17:42,073 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.Compactor(224): Compacting a631ae2750414318af30c4bee5c4cb4f, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=240, earliestPutTs=1733480258498 2024-12-06T10:17:42,073 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.Compactor(224): Compacting 143a0bccaa4e4452b2b8336368a90149, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=265, earliestPutTs=1733480258528 2024-12-06T10:17:42,073 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.Compactor(224): Compacting 5446ed762a054e5ea7c2051ba8781db9, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=265, earliestPutTs=1733480258528 2024-12-06T10:17:42,073 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.Compactor(224): Compacting d339be6ae5da4a43811ec2af2806d36c, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=280, earliestPutTs=1733480259664 2024-12-06T10:17:42,073 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store cba9fa4bb7ad0155608756e918c3bf01:C, priority=-2147483648, current under compaction store size is 3 2024-12-06T10:17:42,074 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-06T10:17:42,074 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.Compactor(224): Compacting 6ed9a1d06f1144b9b703079c14e8971b, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=280, earliestPutTs=1733480259664 2024-12-06T10:17:42,081 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=cba9fa4bb7ad0155608756e918c3bf01] 2024-12-06T10:17:42,088 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): cba9fa4bb7ad0155608756e918c3bf01#B#compaction#204 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-06T10:17:42,088 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/.tmp/B/558cc231625943629b43e6bddf10ca39 is 50, key is test_row_0/B:col10/1733480259664/Put/seqid=0 2024-12-06T10:17:42,093 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241206f51a653ca642463fbfcd876f06ea9f46_cba9fa4bb7ad0155608756e918c3bf01 store=[table=TestAcidGuarantees family=A region=cba9fa4bb7ad0155608756e918c3bf01] 2024-12-06T10:17:42,095 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241206f51a653ca642463fbfcd876f06ea9f46_cba9fa4bb7ad0155608756e918c3bf01, store=[table=TestAcidGuarantees family=A region=cba9fa4bb7ad0155608756e918c3bf01] 2024-12-06T10:17:42,095 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241206f51a653ca642463fbfcd876f06ea9f46_cba9fa4bb7ad0155608756e918c3bf01 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=cba9fa4bb7ad0155608756e918c3bf01] 2024-12-06T10:17:42,108 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742066_1242 (size=4469) 2024-12-06T10:17:42,108 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742065_1241 (size=12983) 2024-12-06T10:17:42,218 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,33397,1733480204743 2024-12-06T10:17:42,219 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33397 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=60 2024-12-06T10:17:42,219 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01. 2024-12-06T10:17:42,219 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HRegion(2837): Flushing cba9fa4bb7ad0155608756e918c3bf01 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-12-06T10:17:42,219 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cba9fa4bb7ad0155608756e918c3bf01, store=A 2024-12-06T10:17:42,220 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:17:42,220 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cba9fa4bb7ad0155608756e918c3bf01, store=B 2024-12-06T10:17:42,220 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:17:42,220 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cba9fa4bb7ad0155608756e918c3bf01, store=C 2024-12-06T10:17:42,220 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:17:42,231 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412069efecd0530234cf497666cd6da7256fd_cba9fa4bb7ad0155608756e918c3bf01 is 50, key is test_row_0/A:col10/1733480260802/Put/seqid=0 2024-12-06T10:17:42,236 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742067_1243 (size=12454) 2024-12-06T10:17:42,237 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,241 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412069efecd0530234cf497666cd6da7256fd_cba9fa4bb7ad0155608756e918c3bf01 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412069efecd0530234cf497666cd6da7256fd_cba9fa4bb7ad0155608756e918c3bf01 2024-12-06T10:17:42,242 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/.tmp/A/67414dbe47e8495f9265a61612cb1fbf, store: [table=TestAcidGuarantees family=A region=cba9fa4bb7ad0155608756e918c3bf01] 2024-12-06T10:17:42,242 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/.tmp/A/67414dbe47e8495f9265a61612cb1fbf is 175, key is test_row_0/A:col10/1733480260802/Put/seqid=0 2024-12-06T10:17:42,247 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742068_1244 (size=31255) 2024-12-06T10:17:42,247 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=303, memsize=44.7 K, hasBloomFilter=true, into tmp file hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/.tmp/A/67414dbe47e8495f9265a61612cb1fbf 2024-12-06T10:17:42,256 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/.tmp/B/60b86089fe1f455c95b26aab7a65b418 is 50, key is test_row_0/B:col10/1733480260802/Put/seqid=0 2024-12-06T10:17:42,261 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742069_1245 (size=12301) 2024-12-06T10:17:42,507 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): cba9fa4bb7ad0155608756e918c3bf01#A#compaction#203 average throughput is 0.06 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-06T10:17:42,507 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/.tmp/A/0312b054b33a4e95b9b38e32ac15a467 is 175, key is test_row_0/A:col10/1733480259664/Put/seqid=0 2024-12-06T10:17:42,513 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/.tmp/B/558cc231625943629b43e6bddf10ca39 as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/B/558cc231625943629b43e6bddf10ca39 2024-12-06T10:17:42,513 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742070_1246 (size=31937) 2024-12-06T10:17:42,520 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/.tmp/A/0312b054b33a4e95b9b38e32ac15a467 as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/A/0312b054b33a4e95b9b38e32ac15a467 2024-12-06T10:17:42,520 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in cba9fa4bb7ad0155608756e918c3bf01/B of cba9fa4bb7ad0155608756e918c3bf01 into 558cc231625943629b43e6bddf10ca39(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-06T10:17:42,521 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for cba9fa4bb7ad0155608756e918c3bf01: 2024-12-06T10:17:42,521 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01., storeName=cba9fa4bb7ad0155608756e918c3bf01/B, priority=13, startTime=1733480262070; duration=0sec 2024-12-06T10:17:42,521 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-06T10:17:42,521 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: cba9fa4bb7ad0155608756e918c3bf01:B 2024-12-06T10:17:42,521 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-06T10:17:42,522 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37283 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-06T10:17:42,522 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HStore(1540): cba9fa4bb7ad0155608756e918c3bf01/C is initiating minor compaction (all files) 2024-12-06T10:17:42,522 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of cba9fa4bb7ad0155608756e918c3bf01/C in TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01. 2024-12-06T10:17:42,523 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/C/5c37337a8c944e2393acb65e187c697f, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/C/75fc84dcd0f14ac89a26b1757d99d447, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/C/b90b1ea1929546c5b7c82b7dc942fe2e] into tmpdir=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/.tmp, totalSize=36.4 K 2024-12-06T10:17:42,523 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.Compactor(224): Compacting 5c37337a8c944e2393acb65e187c697f, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=240, earliestPutTs=1733480258498 2024-12-06T10:17:42,524 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.Compactor(224): Compacting 75fc84dcd0f14ac89a26b1757d99d447, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=265, earliestPutTs=1733480258528 2024-12-06T10:17:42,524 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.Compactor(224): Compacting b90b1ea1929546c5b7c82b7dc942fe2e, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=280, earliestPutTs=1733480259664 2024-12-06T10:17:42,528 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in cba9fa4bb7ad0155608756e918c3bf01/A of cba9fa4bb7ad0155608756e918c3bf01 into 0312b054b33a4e95b9b38e32ac15a467(size=31.2 K), total size for store is 31.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-06T10:17:42,528 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for cba9fa4bb7ad0155608756e918c3bf01: 2024-12-06T10:17:42,528 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01., storeName=cba9fa4bb7ad0155608756e918c3bf01/A, priority=13, startTime=1733480262069; duration=0sec 2024-12-06T10:17:42,528 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T10:17:42,528 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: cba9fa4bb7ad0155608756e918c3bf01:A 2024-12-06T10:17:42,534 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): cba9fa4bb7ad0155608756e918c3bf01#C#compaction#207 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-06T10:17:42,534 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/.tmp/C/0a940abecb6244f1bec238f03001a345 is 50, key is test_row_0/C:col10/1733480259664/Put/seqid=0 2024-12-06T10:17:42,558 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742071_1247 (size=12983) 2024-12-06T10:17:42,566 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/.tmp/C/0a940abecb6244f1bec238f03001a345 as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/C/0a940abecb6244f1bec238f03001a345 2024-12-06T10:17:42,571 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in cba9fa4bb7ad0155608756e918c3bf01/C of cba9fa4bb7ad0155608756e918c3bf01 into 0a940abecb6244f1bec238f03001a345(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-06T10:17:42,571 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for cba9fa4bb7ad0155608756e918c3bf01: 2024-12-06T10:17:42,571 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01., storeName=cba9fa4bb7ad0155608756e918c3bf01/C, priority=13, startTime=1733480262072; duration=0sec 2024-12-06T10:17:42,571 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T10:17:42,571 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: cba9fa4bb7ad0155608756e918c3bf01:C 2024-12-06T10:17:42,662 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=303 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/.tmp/B/60b86089fe1f455c95b26aab7a65b418 2024-12-06T10:17:42,673 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/.tmp/C/ee4db79eb349470e8ab624beaeefc158 is 50, key is test_row_0/C:col10/1733480260802/Put/seqid=0 2024-12-06T10:17:42,679 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742072_1248 (size=12301) 2024-12-06T10:17:42,680 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=303 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/.tmp/C/ee4db79eb349470e8ab624beaeefc158 2024-12-06T10:17:42,689 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/.tmp/A/67414dbe47e8495f9265a61612cb1fbf as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/A/67414dbe47e8495f9265a61612cb1fbf 2024-12-06T10:17:42,694 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/A/67414dbe47e8495f9265a61612cb1fbf, entries=150, sequenceid=303, filesize=30.5 K 2024-12-06T10:17:42,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,695 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/.tmp/B/60b86089fe1f455c95b26aab7a65b418 as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/B/60b86089fe1f455c95b26aab7a65b418 2024-12-06T10:17:42,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,701 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,701 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,701 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,701 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,703 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/B/60b86089fe1f455c95b26aab7a65b418, entries=150, sequenceid=303, filesize=12.0 K 2024-12-06T10:17:42,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,705 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/.tmp/C/ee4db79eb349470e8ab624beaeefc158 as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/C/ee4db79eb349470e8ab624beaeefc158 2024-12-06T10:17:42,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,714 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/C/ee4db79eb349470e8ab624beaeefc158, entries=150, sequenceid=303, filesize=12.0 K 2024-12-06T10:17:42,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,716 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=0 B/0 for cba9fa4bb7ad0155608756e918c3bf01 in 496ms, sequenceid=303, compaction requested=false 2024-12-06T10:17:42,716 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HRegion(2538): Flush status journal for cba9fa4bb7ad0155608756e918c3bf01: 2024-12-06T10:17:42,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,716 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01. 2024-12-06T10:17:42,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,716 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=60 2024-12-06T10:17:42,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.HMaster(4106): Remote procedure done, pid=60 2024-12-06T10:17:42,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,719 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=60, resume processing ppid=59 2024-12-06T10:17:42,719 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=60, ppid=59, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.0450 sec 2024-12-06T10:17:42,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,721 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=59, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=59, table=TestAcidGuarantees in 2.0490 sec 2024-12-06T10:17:42,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,742 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,742 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,742 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,742 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,756 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,756 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=59 2024-12-06T10:17:42,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,777 INFO [Thread-774 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 59 completed 2024-12-06T10:17:42,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,778 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-06T10:17:42,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] procedure2.ProcedureExecutor(1098): Stored pid=61, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=61, table=TestAcidGuarantees 2024-12-06T10:17:42,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=61 2024-12-06T10:17:42,780 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=61, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=61, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-06T10:17:42,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,781 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=61, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=61, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-06T10:17:42,781 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=62, ppid=61, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-06T10:17:42,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=61 2024-12-06T10:17:42,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,932 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,33397,1733480204743 2024-12-06T10:17:42,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,933 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33397 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=62 2024-12-06T10:17:42,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,933 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01. 2024-12-06T10:17:42,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,933 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HRegion(2538): Flush status journal for cba9fa4bb7ad0155608756e918c3bf01: 2024-12-06T10:17:42,933 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01. 2024-12-06T10:17:42,934 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=62 2024-12-06T10:17:42,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.HMaster(4106): Remote procedure done, pid=62 2024-12-06T10:17:42,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,937 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=62, resume processing ppid=61 2024-12-06T10:17:42,937 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=62, ppid=61, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 154 msec 2024-12-06T10:17:42,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,939 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=61, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=61, table=TestAcidGuarantees in 159 msec 2024-12-06T10:17:42,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,976 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,976 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,976 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,977 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,977 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,977 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,977 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,979 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,979 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,979 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,979 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,980 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,980 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,980 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,980 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,981 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,981 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,981 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,981 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,984 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,984 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,984 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,984 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,990 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,990 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,990 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,990 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,993 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,993 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,993 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,996 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,996 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,997 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:42,999 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:43,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:43,001 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:43,001 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:43,003 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:43,004 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:43,004 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:43,005 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:43,005 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(8581): Flush requested on cba9fa4bb7ad0155608756e918c3bf01 2024-12-06T10:17:43,005 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing cba9fa4bb7ad0155608756e918c3bf01 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-06T10:17:43,006 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cba9fa4bb7ad0155608756e918c3bf01, store=A 2024-12-06T10:17:43,006 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:17:43,006 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cba9fa4bb7ad0155608756e918c3bf01, store=B 2024-12-06T10:17:43,006 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:17:43,006 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cba9fa4bb7ad0155608756e918c3bf01, store=C 2024-12-06T10:17:43,006 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:17:43,006 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:43,007 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:43,007 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:43,008 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:43,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:43,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:43,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:43,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:43,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:43,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:43,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:43,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:43,015 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:43,016 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:43,016 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:43,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:43,018 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:43,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:43,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:43,020 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:43,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:43,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:43,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:43,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:43,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:43,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:43,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:43,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:43,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:43,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:43,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:43,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:43,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:43,031 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241206442b6c7eeaf24942bfc5cb5510f6afac_cba9fa4bb7ad0155608756e918c3bf01 is 50, key is test_row_0/A:col10/1733480263004/Put/seqid=0 2024-12-06T10:17:43,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:43,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:43,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:43,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:43,036 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:43,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:43,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:43,042 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742073_1249 (size=25158) 2024-12-06T10:17:43,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:43,043 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:43,052 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241206442b6c7eeaf24942bfc5cb5510f6afac_cba9fa4bb7ad0155608756e918c3bf01 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241206442b6c7eeaf24942bfc5cb5510f6afac_cba9fa4bb7ad0155608756e918c3bf01 2024-12-06T10:17:43,054 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/.tmp/A/7703aaa26c9740eea92666e564fa1f48, store: [table=TestAcidGuarantees family=A region=cba9fa4bb7ad0155608756e918c3bf01] 2024-12-06T10:17:43,055 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/.tmp/A/7703aaa26c9740eea92666e564fa1f48 is 175, key is test_row_0/A:col10/1733480263004/Put/seqid=0 2024-12-06T10:17:43,055 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:43,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 158 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43880 deadline: 1733480323046, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:43,055 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-06T10:17:43,056 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:43,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 158 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43906 deadline: 1733480323047, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:43,057 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:43,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 157 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43930 deadline: 1733480323048, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:43,058 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:43,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 158 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43884 deadline: 1733480323050, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:43,058 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:43,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 155 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43864 deadline: 1733480323055, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:43,071 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742074_1250 (size=74795) 2024-12-06T10:17:43,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=61 2024-12-06T10:17:43,082 INFO [Thread-774 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 61 completed 2024-12-06T10:17:43,083 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-06T10:17:43,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] procedure2.ProcedureExecutor(1098): Stored pid=63, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=63, table=TestAcidGuarantees 2024-12-06T10:17:43,085 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=63, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=63, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-06T10:17:43,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=63 2024-12-06T10:17:43,086 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=63, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=63, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-06T10:17:43,086 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=64, ppid=63, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-06T10:17:43,157 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:43,157 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 160 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43880 deadline: 1733480323156, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:43,157 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:43,157 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 160 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43906 deadline: 1733480323157, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:43,162 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:43,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 160 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43884 deadline: 1733480323159, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:43,162 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:43,162 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:43,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 157 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43864 deadline: 1733480323160, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:43,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 159 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43930 deadline: 1733480323160, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:43,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=63 2024-12-06T10:17:43,238 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,33397,1733480204743 2024-12-06T10:17:43,238 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33397 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=64 2024-12-06T10:17:43,238 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01. 2024-12-06T10:17:43,238 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01. as already flushing 2024-12-06T10:17:43,239 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01. 2024-12-06T10:17:43,239 ERROR [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=64}] handler.RSProcedureHandler(58): pid=64 java.io.IOException: Unable to complete flush {ENCODED => cba9fa4bb7ad0155608756e918c3bf01, NAME => 'TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:17:43,239 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=64 java.io.IOException: Unable to complete flush {ENCODED => cba9fa4bb7ad0155608756e918c3bf01, NAME => 'TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:17:43,239 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.HMaster(4114): Remote procedure failed, pid=64 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => cba9fa4bb7ad0155608756e918c3bf01, NAME => 'TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => cba9fa4bb7ad0155608756e918c3bf01, NAME => 'TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:17:43,360 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:43,360 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 162 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43880 deadline: 1733480323358, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:43,360 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:43,360 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 162 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43906 deadline: 1733480323358, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:43,366 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:43,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 159 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43864 deadline: 1733480323364, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:43,366 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:43,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 161 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43930 deadline: 1733480323364, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:43,366 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:43,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 162 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43884 deadline: 1733480323364, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:43,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=63 2024-12-06T10:17:43,391 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,33397,1733480204743 2024-12-06T10:17:43,391 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33397 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=64 2024-12-06T10:17:43,391 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01. 2024-12-06T10:17:43,391 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01. as already flushing 2024-12-06T10:17:43,392 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01. 2024-12-06T10:17:43,392 ERROR [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=64}] handler.RSProcedureHandler(58): pid=64 java.io.IOException: Unable to complete flush {ENCODED => cba9fa4bb7ad0155608756e918c3bf01, NAME => 'TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:17:43,392 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=64 java.io.IOException: Unable to complete flush {ENCODED => cba9fa4bb7ad0155608756e918c3bf01, NAME => 'TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:17:43,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.HMaster(4114): Remote procedure failed, pid=64 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => cba9fa4bb7ad0155608756e918c3bf01, NAME => 'TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => cba9fa4bb7ad0155608756e918c3bf01, NAME => 'TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:17:43,472 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=318, memsize=17.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/.tmp/A/7703aaa26c9740eea92666e564fa1f48 2024-12-06T10:17:43,479 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/.tmp/B/3d3f7aa0a66443e2afe702d99aacb102 is 50, key is test_row_0/B:col10/1733480263004/Put/seqid=0 2024-12-06T10:17:43,484 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742075_1251 (size=12301) 2024-12-06T10:17:43,485 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=318 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/.tmp/B/3d3f7aa0a66443e2afe702d99aacb102 2024-12-06T10:17:43,494 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/.tmp/C/279de041765b472281ca315e158cf918 is 50, key is test_row_0/C:col10/1733480263004/Put/seqid=0 2024-12-06T10:17:43,500 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742076_1252 (size=12301) 2024-12-06T10:17:43,505 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=318 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/.tmp/C/279de041765b472281ca315e158cf918 2024-12-06T10:17:43,512 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/.tmp/A/7703aaa26c9740eea92666e564fa1f48 as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/A/7703aaa26c9740eea92666e564fa1f48 2024-12-06T10:17:43,516 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/A/7703aaa26c9740eea92666e564fa1f48, entries=400, sequenceid=318, filesize=73.0 K 2024-12-06T10:17:43,518 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/.tmp/B/3d3f7aa0a66443e2afe702d99aacb102 as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/B/3d3f7aa0a66443e2afe702d99aacb102 2024-12-06T10:17:43,523 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/B/3d3f7aa0a66443e2afe702d99aacb102, entries=150, sequenceid=318, filesize=12.0 K 2024-12-06T10:17:43,524 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/.tmp/C/279de041765b472281ca315e158cf918 as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/C/279de041765b472281ca315e158cf918 2024-12-06T10:17:43,532 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/C/279de041765b472281ca315e158cf918, entries=150, sequenceid=318, filesize=12.0 K 2024-12-06T10:17:43,533 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for cba9fa4bb7ad0155608756e918c3bf01 in 528ms, sequenceid=318, compaction requested=true 2024-12-06T10:17:43,533 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for cba9fa4bb7ad0155608756e918c3bf01: 2024-12-06T10:17:43,533 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store cba9fa4bb7ad0155608756e918c3bf01:A, priority=-2147483648, current under compaction store size is 1 2024-12-06T10:17:43,533 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T10:17:43,533 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store cba9fa4bb7ad0155608756e918c3bf01:B, priority=-2147483648, current under compaction store size is 2 2024-12-06T10:17:43,533 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T10:17:43,533 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-06T10:17:43,533 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store cba9fa4bb7ad0155608756e918c3bf01:C, priority=-2147483648, current under compaction store size is 3 2024-12-06T10:17:43,533 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-06T10:17:43,533 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-06T10:17:43,535 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37585 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-06T10:17:43,535 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HStore(1540): cba9fa4bb7ad0155608756e918c3bf01/B is initiating minor compaction (all files) 2024-12-06T10:17:43,535 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of cba9fa4bb7ad0155608756e918c3bf01/B in TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01. 2024-12-06T10:17:43,535 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/B/558cc231625943629b43e6bddf10ca39, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/B/60b86089fe1f455c95b26aab7a65b418, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/B/3d3f7aa0a66443e2afe702d99aacb102] into tmpdir=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/.tmp, totalSize=36.7 K 2024-12-06T10:17:43,535 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 137987 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-06T10:17:43,535 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.Compactor(224): Compacting 558cc231625943629b43e6bddf10ca39, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=280, earliestPutTs=1733480259664 2024-12-06T10:17:43,535 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HStore(1540): cba9fa4bb7ad0155608756e918c3bf01/A is initiating minor compaction (all files) 2024-12-06T10:17:43,535 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of cba9fa4bb7ad0155608756e918c3bf01/A in TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01. 2024-12-06T10:17:43,536 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/A/0312b054b33a4e95b9b38e32ac15a467, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/A/67414dbe47e8495f9265a61612cb1fbf, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/A/7703aaa26c9740eea92666e564fa1f48] into tmpdir=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/.tmp, totalSize=134.8 K 2024-12-06T10:17:43,536 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01. 2024-12-06T10:17:43,536 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01. files: [hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/A/0312b054b33a4e95b9b38e32ac15a467, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/A/67414dbe47e8495f9265a61612cb1fbf, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/A/7703aaa26c9740eea92666e564fa1f48] 2024-12-06T10:17:43,536 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.Compactor(224): Compacting 60b86089fe1f455c95b26aab7a65b418, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=303, earliestPutTs=1733480260793 2024-12-06T10:17:43,537 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.Compactor(224): Compacting 3d3f7aa0a66443e2afe702d99aacb102, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=318, earliestPutTs=1733480263002 2024-12-06T10:17:43,537 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.Compactor(224): Compacting 0312b054b33a4e95b9b38e32ac15a467, keycount=150, bloomtype=ROW, size=31.2 K, encoding=NONE, compression=NONE, seqNum=280, earliestPutTs=1733480259664 2024-12-06T10:17:43,537 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.Compactor(224): Compacting 67414dbe47e8495f9265a61612cb1fbf, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=303, earliestPutTs=1733480260793 2024-12-06T10:17:43,538 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.Compactor(224): Compacting 7703aaa26c9740eea92666e564fa1f48, keycount=400, bloomtype=ROW, size=73.0 K, encoding=NONE, compression=NONE, seqNum=318, earliestPutTs=1733480262996 2024-12-06T10:17:43,544 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,33397,1733480204743 2024-12-06T10:17:43,544 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33397 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=64 2024-12-06T10:17:43,544 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01. 2024-12-06T10:17:43,544 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.HRegion(2837): Flushing cba9fa4bb7ad0155608756e918c3bf01 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-12-06T10:17:43,545 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cba9fa4bb7ad0155608756e918c3bf01, store=A 2024-12-06T10:17:43,545 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:17:43,545 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cba9fa4bb7ad0155608756e918c3bf01, store=B 2024-12-06T10:17:43,545 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:17:43,545 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cba9fa4bb7ad0155608756e918c3bf01, store=C 2024-12-06T10:17:43,545 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:17:43,546 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=cba9fa4bb7ad0155608756e918c3bf01] 2024-12-06T10:17:43,546 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): cba9fa4bb7ad0155608756e918c3bf01#B#compaction#212 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-06T10:17:43,547 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/.tmp/B/f1f4ede83afe4410ab17ed54b99bde65 is 50, key is test_row_0/B:col10/1733480263004/Put/seqid=0 2024-12-06T10:17:43,548 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e202412064a0e3b48921149388def9f22fdc53706_cba9fa4bb7ad0155608756e918c3bf01 store=[table=TestAcidGuarantees family=A region=cba9fa4bb7ad0155608756e918c3bf01] 2024-12-06T10:17:43,550 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e202412064a0e3b48921149388def9f22fdc53706_cba9fa4bb7ad0155608756e918c3bf01, store=[table=TestAcidGuarantees family=A region=cba9fa4bb7ad0155608756e918c3bf01] 2024-12-06T10:17:43,550 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412064a0e3b48921149388def9f22fdc53706_cba9fa4bb7ad0155608756e918c3bf01 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=cba9fa4bb7ad0155608756e918c3bf01] 2024-12-06T10:17:43,570 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=64}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024120640a8491bfc2f46a68383014ca8304ae7_cba9fa4bb7ad0155608756e918c3bf01 is 50, key is test_row_0/A:col10/1733480263054/Put/seqid=0 2024-12-06T10:17:43,573 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742077_1253 (size=13085) 2024-12-06T10:17:43,578 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/.tmp/B/f1f4ede83afe4410ab17ed54b99bde65 as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/B/f1f4ede83afe4410ab17ed54b99bde65 2024-12-06T10:17:43,584 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in cba9fa4bb7ad0155608756e918c3bf01/B of cba9fa4bb7ad0155608756e918c3bf01 into f1f4ede83afe4410ab17ed54b99bde65(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-06T10:17:43,584 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for cba9fa4bb7ad0155608756e918c3bf01: 2024-12-06T10:17:43,584 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01., storeName=cba9fa4bb7ad0155608756e918c3bf01/B, priority=13, startTime=1733480263533; duration=0sec 2024-12-06T10:17:43,584 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-06T10:17:43,584 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: cba9fa4bb7ad0155608756e918c3bf01:B 2024-12-06T10:17:43,584 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-06T10:17:43,586 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37585 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-06T10:17:43,586 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HStore(1540): cba9fa4bb7ad0155608756e918c3bf01/C is initiating minor compaction (all files) 2024-12-06T10:17:43,586 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of cba9fa4bb7ad0155608756e918c3bf01/C in TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01. 2024-12-06T10:17:43,586 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/C/0a940abecb6244f1bec238f03001a345, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/C/ee4db79eb349470e8ab624beaeefc158, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/C/279de041765b472281ca315e158cf918] into tmpdir=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/.tmp, totalSize=36.7 K 2024-12-06T10:17:43,586 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.Compactor(224): Compacting 0a940abecb6244f1bec238f03001a345, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=280, earliestPutTs=1733480259664 2024-12-06T10:17:43,587 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.Compactor(224): Compacting ee4db79eb349470e8ab624beaeefc158, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=303, earliestPutTs=1733480260793 2024-12-06T10:17:43,587 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.Compactor(224): Compacting 279de041765b472281ca315e158cf918, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=318, earliestPutTs=1733480263002 2024-12-06T10:17:43,595 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742078_1254 (size=4469) 2024-12-06T10:17:43,595 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742079_1255 (size=12454) 2024-12-06T10:17:43,598 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): cba9fa4bb7ad0155608756e918c3bf01#C#compaction#215 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-06T10:17:43,599 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/.tmp/C/8483621323574f5791e8ec9e9f578611 is 50, key is test_row_0/C:col10/1733480263004/Put/seqid=0 2024-12-06T10:17:43,602 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=64}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:43,609 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024120640a8491bfc2f46a68383014ca8304ae7_cba9fa4bb7ad0155608756e918c3bf01 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024120640a8491bfc2f46a68383014ca8304ae7_cba9fa4bb7ad0155608756e918c3bf01 2024-12-06T10:17:43,610 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=64}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/.tmp/A/c30b0dae05bc43dfb38abc21218cad7b, store: [table=TestAcidGuarantees family=A region=cba9fa4bb7ad0155608756e918c3bf01] 2024-12-06T10:17:43,611 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=64}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/.tmp/A/c30b0dae05bc43dfb38abc21218cad7b is 175, key is test_row_0/A:col10/1733480263054/Put/seqid=0 2024-12-06T10:17:43,618 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742081_1257 (size=31255) 2024-12-06T10:17:43,618 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=64}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=343, memsize=49.2 K, hasBloomFilter=true, into tmp file hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/.tmp/A/c30b0dae05bc43dfb38abc21218cad7b 2024-12-06T10:17:43,621 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742080_1256 (size=13085) 2024-12-06T10:17:43,633 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=64}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/.tmp/B/82ea790d3a56430c9afefe432a9d08f2 is 50, key is test_row_0/B:col10/1733480263054/Put/seqid=0 2024-12-06T10:17:43,648 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742082_1258 (size=12301) 2024-12-06T10:17:43,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(8581): Flush requested on cba9fa4bb7ad0155608756e918c3bf01 2024-12-06T10:17:43,667 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01. as already flushing 2024-12-06T10:17:43,677 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:43,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 166 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43906 deadline: 1733480323673, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:43,677 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:43,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 165 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43884 deadline: 1733480323673, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:43,677 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:43,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 162 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43864 deadline: 1733480323674, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:43,678 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:43,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 166 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43880 deadline: 1733480323674, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:43,678 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:43,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 165 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43930 deadline: 1733480323675, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:43,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=63 2024-12-06T10:17:43,779 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:43,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 168 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43906 deadline: 1733480323778, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:43,780 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:43,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 167 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43884 deadline: 1733480323778, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:43,780 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:43,780 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:43,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 164 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43864 deadline: 1733480323778, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:43,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 168 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43880 deadline: 1733480323779, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:43,780 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:43,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 167 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43930 deadline: 1733480323779, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:43,982 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:43,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 170 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43906 deadline: 1733480323980, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:43,983 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:43,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 166 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43864 deadline: 1733480323981, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:43,983 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:43,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 169 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43884 deadline: 1733480323982, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:43,984 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:43,984 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 169 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43930 deadline: 1733480323982, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:43,984 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:43,984 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 170 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43880 deadline: 1733480323983, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:43,997 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): cba9fa4bb7ad0155608756e918c3bf01#A#compaction#213 average throughput is 0.05 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-06T10:17:43,998 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/.tmp/A/a02c466717394270807d8f402d1ba2a3 is 175, key is test_row_0/A:col10/1733480263004/Put/seqid=0 2024-12-06T10:17:44,028 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/.tmp/C/8483621323574f5791e8ec9e9f578611 as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/C/8483621323574f5791e8ec9e9f578611 2024-12-06T10:17:44,038 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742083_1259 (size=32039) 2024-12-06T10:17:44,040 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in cba9fa4bb7ad0155608756e918c3bf01/C of cba9fa4bb7ad0155608756e918c3bf01 into 8483621323574f5791e8ec9e9f578611(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-06T10:17:44,040 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for cba9fa4bb7ad0155608756e918c3bf01: 2024-12-06T10:17:44,040 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01., storeName=cba9fa4bb7ad0155608756e918c3bf01/C, priority=13, startTime=1733480263533; duration=0sec 2024-12-06T10:17:44,041 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T10:17:44,041 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: cba9fa4bb7ad0155608756e918c3bf01:C 2024-12-06T10:17:44,049 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=343 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/.tmp/B/82ea790d3a56430c9afefe432a9d08f2 2024-12-06T10:17:44,061 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=64}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/.tmp/C/f942246d18f1462a9530ef9aca33f078 is 50, key is test_row_0/C:col10/1733480263054/Put/seqid=0 2024-12-06T10:17:44,077 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742084_1260 (size=12301) 2024-12-06T10:17:44,078 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=343 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/.tmp/C/f942246d18f1462a9530ef9aca33f078 2024-12-06T10:17:44,085 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/.tmp/A/c30b0dae05bc43dfb38abc21218cad7b as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/A/c30b0dae05bc43dfb38abc21218cad7b 2024-12-06T10:17:44,091 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/A/c30b0dae05bc43dfb38abc21218cad7b, entries=150, sequenceid=343, filesize=30.5 K 2024-12-06T10:17:44,093 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/.tmp/B/82ea790d3a56430c9afefe432a9d08f2 as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/B/82ea790d3a56430c9afefe432a9d08f2 2024-12-06T10:17:44,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,099 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,099 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,099 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,100 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/B/82ea790d3a56430c9afefe432a9d08f2, entries=150, sequenceid=343, filesize=12.0 K 2024-12-06T10:17:44,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,102 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/.tmp/C/f942246d18f1462a9530ef9aca33f078 as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/C/f942246d18f1462a9530ef9aca33f078 2024-12-06T10:17:44,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,108 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/C/f942246d18f1462a9530ef9aca33f078, entries=150, sequenceid=343, filesize=12.0 K 2024-12-06T10:17:44,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,109 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=53.67 KB/54960 for cba9fa4bb7ad0155608756e918c3bf01 in 565ms, sequenceid=343, compaction requested=false 2024-12-06T10:17:44,109 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.HRegion(2538): Flush status journal for cba9fa4bb7ad0155608756e918c3bf01: 2024-12-06T10:17:44,110 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01. 2024-12-06T10:17:44,110 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=64 2024-12-06T10:17:44,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.HMaster(4106): Remote procedure done, pid=64 2024-12-06T10:17:44,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,112 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=64, resume processing ppid=63 2024-12-06T10:17:44,113 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=64, ppid=63, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.0250 sec 2024-12-06T10:17:44,113 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,114 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=63, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=63, table=TestAcidGuarantees in 1.0300 sec 2024-12-06T10:17:44,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,124 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,124 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,124 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,124 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,127 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,127 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,127 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,127 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,137 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,148 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,148 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,148 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,148 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,151 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,151 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,151 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,153 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,153 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,153 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,155 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,155 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,155 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,157 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,157 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,157 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,157 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,158 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,158 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,158 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,158 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,158 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,159 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,159 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,159 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,159 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,159 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,166 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,166 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,166 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,166 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,167 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,167 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,167 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,167 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,167 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,169 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,169 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,169 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,173 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,173 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,173 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,173 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,174 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,174 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,174 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,180 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,180 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,180 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,184 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,184 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,184 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,185 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,185 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,185 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,188 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,188 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,188 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,188 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=63 2024-12-06T10:17:44,189 INFO [Thread-774 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 63 completed 2024-12-06T10:17:44,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,190 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-06T10:17:44,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,193 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,193 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,193 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,193 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,194 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,194 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,194 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,195 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,195 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,195 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,196 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,196 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,197 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,197 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,197 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,199 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,199 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,199 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,200 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] procedure2.ProcedureExecutor(1098): Stored pid=65, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=65, table=TestAcidGuarantees 2024-12-06T10:17:44,200 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,200 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,200 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,201 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=65 2024-12-06T10:17:44,201 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,201 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=65, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=65, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-06T10:17:44,201 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,202 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,202 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=65, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=65, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-06T10:17:44,202 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=66, ppid=65, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-06T10:17:44,202 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,202 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,203 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,203 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,203 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,204 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,204 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,204 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,206 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,206 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,207 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,207 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,208 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,208 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,212 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,212 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,214 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,214 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,217 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,217 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,217 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,217 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,219 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,219 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,219 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,219 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,219 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,220 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,220 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,220 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,221 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,221 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,222 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,222 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,223 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,224 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,224 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,224 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,225 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,226 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,226 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,227 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,227 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,228 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,228 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,229 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,229 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,229 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,230 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,230 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,230 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,230 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,231 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,231 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,231 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,231 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,232 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,232 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,232 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,233 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,233 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,233 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,233 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,233 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,234 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,234 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,234 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,234 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,235 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,235 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,235 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,235 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,236 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,236 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,236 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,236 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,236 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,237 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,237 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,237 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,237 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,238 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,238 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,238 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,238 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,238 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,239 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,239 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,239 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,239 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,240 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,240 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,240 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,240 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,241 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,241 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,241 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,241 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,241 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,242 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,242 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,242 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,242 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,243 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,243 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,243 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,243 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,244 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,244 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,244 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,244 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,244 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,245 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,245 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,245 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,246 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,246 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,246 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,246 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,246 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,247 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,247 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,247 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,247 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,248 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,248 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,248 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,248 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,249 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,249 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,249 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,250 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,250 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,250 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,250 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,250 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,250 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,251 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,251 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,251 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,252 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,252 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,252 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,253 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,253 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,253 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,254 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,254 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,254 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,254 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,254 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,254 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,257 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,257 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,257 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,258 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,259 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,259 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,260 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,260 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,261 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,262 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,262 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,262 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,265 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,265 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,265 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,265 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,266 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,266 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,266 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,267 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,267 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,267 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,268 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,268 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,269 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,269 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,270 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,270 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,271 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,271 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,272 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,272 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,272 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,276 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,276 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,276 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,277 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,277 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,277 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,279 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,279 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,279 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,279 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,282 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,282 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,282 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,284 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,284 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,290 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,290 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,290 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,291 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,291 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,291 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,291 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,291 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,292 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,292 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,292 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,300 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,300 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=65 2024-12-06T10:17:44,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,303 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,303 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,304 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,305 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing cba9fa4bb7ad0155608756e918c3bf01 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-12-06T10:17:44,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(8581): Flush requested on cba9fa4bb7ad0155608756e918c3bf01 2024-12-06T10:17:44,305 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cba9fa4bb7ad0155608756e918c3bf01, store=A 2024-12-06T10:17:44,305 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:17:44,305 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cba9fa4bb7ad0155608756e918c3bf01, store=B 2024-12-06T10:17:44,305 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:17:44,305 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cba9fa4bb7ad0155608756e918c3bf01, store=C 2024-12-06T10:17:44,305 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:17:44,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,306 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,307 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,308 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,311 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,311 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,314 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,315 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241206127708b0941e4f8cb35a60ab40a08648_cba9fa4bb7ad0155608756e918c3bf01 is 50, key is test_row_0/A:col10/1733480264303/Put/seqid=0 2024-12-06T10:17:44,328 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742085_1261 (size=14994) 2024-12-06T10:17:44,329 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,333 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241206127708b0941e4f8cb35a60ab40a08648_cba9fa4bb7ad0155608756e918c3bf01 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241206127708b0941e4f8cb35a60ab40a08648_cba9fa4bb7ad0155608756e918c3bf01 2024-12-06T10:17:44,334 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/.tmp/A/b9614d9f3b0e47fc8ac1ef2d49fd32d0, store: [table=TestAcidGuarantees family=A region=cba9fa4bb7ad0155608756e918c3bf01] 2024-12-06T10:17:44,335 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/.tmp/A/b9614d9f3b0e47fc8ac1ef2d49fd32d0 is 175, key is test_row_0/A:col10/1733480264303/Put/seqid=0 2024-12-06T10:17:44,344 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742086_1262 (size=39949) 2024-12-06T10:17:44,345 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=358, memsize=22.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/.tmp/A/b9614d9f3b0e47fc8ac1ef2d49fd32d0 2024-12-06T10:17:44,352 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/.tmp/B/b40c78e9ebac40e0ab4e47112d9e5e91 is 50, key is test_row_0/B:col10/1733480264303/Put/seqid=0 2024-12-06T10:17:44,354 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,33397,1733480204743 2024-12-06T10:17:44,354 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33397 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=66 2024-12-06T10:17:44,354 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01. 2024-12-06T10:17:44,354 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01. as already flushing 2024-12-06T10:17:44,355 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01. 2024-12-06T10:17:44,355 ERROR [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=66}] handler.RSProcedureHandler(58): pid=66 java.io.IOException: Unable to complete flush {ENCODED => cba9fa4bb7ad0155608756e918c3bf01, NAME => 'TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:17:44,355 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=66 java.io.IOException: Unable to complete flush {ENCODED => cba9fa4bb7ad0155608756e918c3bf01, NAME => 'TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:17:44,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.HMaster(4114): Remote procedure failed, pid=66 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => cba9fa4bb7ad0155608756e918c3bf01, NAME => 'TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => cba9fa4bb7ad0155608756e918c3bf01, NAME => 'TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:17:44,386 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:44,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 176 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43906 deadline: 1733480324380, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:44,388 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742087_1263 (size=12301) 2024-12-06T10:17:44,390 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:44,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 175 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43884 deadline: 1733480324382, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:44,391 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=358 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/.tmp/B/b40c78e9ebac40e0ab4e47112d9e5e91 2024-12-06T10:17:44,392 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:44,393 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:44,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 173 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43864 deadline: 1733480324385, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:44,393 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:44,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 177 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43880 deadline: 1733480324386, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:44,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 175 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43930 deadline: 1733480324384, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:44,408 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/.tmp/C/994c2f14d02f42bfa81e25d6d3672ff5 is 50, key is test_row_0/C:col10/1733480264303/Put/seqid=0 2024-12-06T10:17:44,429 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742088_1264 (size=12301) 2024-12-06T10:17:44,430 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=358 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/.tmp/C/994c2f14d02f42bfa81e25d6d3672ff5 2024-12-06T10:17:44,463 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/.tmp/A/b9614d9f3b0e47fc8ac1ef2d49fd32d0 as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/A/b9614d9f3b0e47fc8ac1ef2d49fd32d0 2024-12-06T10:17:44,469 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/.tmp/A/a02c466717394270807d8f402d1ba2a3 as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/A/a02c466717394270807d8f402d1ba2a3 2024-12-06T10:17:44,472 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/A/b9614d9f3b0e47fc8ac1ef2d49fd32d0, entries=200, sequenceid=358, filesize=39.0 K 2024-12-06T10:17:44,475 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/.tmp/B/b40c78e9ebac40e0ab4e47112d9e5e91 as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/B/b40c78e9ebac40e0ab4e47112d9e5e91 2024-12-06T10:17:44,477 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in cba9fa4bb7ad0155608756e918c3bf01/A of cba9fa4bb7ad0155608756e918c3bf01 into a02c466717394270807d8f402d1ba2a3(size=31.3 K), total size for store is 100.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-06T10:17:44,477 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for cba9fa4bb7ad0155608756e918c3bf01: 2024-12-06T10:17:44,477 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01., storeName=cba9fa4bb7ad0155608756e918c3bf01/A, priority=13, startTime=1733480263533; duration=0sec 2024-12-06T10:17:44,477 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T10:17:44,477 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: cba9fa4bb7ad0155608756e918c3bf01:A 2024-12-06T10:17:44,481 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/B/b40c78e9ebac40e0ab4e47112d9e5e91, entries=150, sequenceid=358, filesize=12.0 K 2024-12-06T10:17:44,482 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/.tmp/C/994c2f14d02f42bfa81e25d6d3672ff5 as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/C/994c2f14d02f42bfa81e25d6d3672ff5 2024-12-06T10:17:44,486 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/C/994c2f14d02f42bfa81e25d6d3672ff5, entries=150, sequenceid=358, filesize=12.0 K 2024-12-06T10:17:44,488 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=134.18 KB/137400 for cba9fa4bb7ad0155608756e918c3bf01 in 183ms, sequenceid=358, compaction requested=true 2024-12-06T10:17:44,488 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for cba9fa4bb7ad0155608756e918c3bf01: 2024-12-06T10:17:44,488 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store cba9fa4bb7ad0155608756e918c3bf01:A, priority=-2147483648, current under compaction store size is 1 2024-12-06T10:17:44,488 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T10:17:44,488 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-06T10:17:44,488 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store cba9fa4bb7ad0155608756e918c3bf01:B, priority=-2147483648, current under compaction store size is 2 2024-12-06T10:17:44,488 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-06T10:17:44,488 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store cba9fa4bb7ad0155608756e918c3bf01:C, priority=-2147483648, current under compaction store size is 3 2024-12-06T10:17:44,488 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-12-06T10:17:44,488 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-06T10:17:44,490 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 103243 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-06T10:17:44,490 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HStore(1540): cba9fa4bb7ad0155608756e918c3bf01/A is initiating minor compaction (all files) 2024-12-06T10:17:44,490 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of cba9fa4bb7ad0155608756e918c3bf01/A in TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01. 2024-12-06T10:17:44,490 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/A/a02c466717394270807d8f402d1ba2a3, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/A/c30b0dae05bc43dfb38abc21218cad7b, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/A/b9614d9f3b0e47fc8ac1ef2d49fd32d0] into tmpdir=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/.tmp, totalSize=100.8 K 2024-12-06T10:17:44,490 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01. 2024-12-06T10:17:44,490 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01. files: [hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/A/a02c466717394270807d8f402d1ba2a3, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/A/c30b0dae05bc43dfb38abc21218cad7b, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/A/b9614d9f3b0e47fc8ac1ef2d49fd32d0] 2024-12-06T10:17:44,491 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.Compactor(224): Compacting a02c466717394270807d8f402d1ba2a3, keycount=150, bloomtype=ROW, size=31.3 K, encoding=NONE, compression=NONE, seqNum=318, earliestPutTs=1733480263002 2024-12-06T10:17:44,491 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing cba9fa4bb7ad0155608756e918c3bf01 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-12-06T10:17:44,491 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.Compactor(224): Compacting c30b0dae05bc43dfb38abc21218cad7b, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=343, earliestPutTs=1733480263044 2024-12-06T10:17:44,491 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cba9fa4bb7ad0155608756e918c3bf01, store=A 2024-12-06T10:17:44,491 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:17:44,491 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cba9fa4bb7ad0155608756e918c3bf01, store=B 2024-12-06T10:17:44,491 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:17:44,491 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cba9fa4bb7ad0155608756e918c3bf01, store=C 2024-12-06T10:17:44,491 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.Compactor(224): Compacting b9614d9f3b0e47fc8ac1ef2d49fd32d0, keycount=200, bloomtype=ROW, size=39.0 K, encoding=NONE, compression=NONE, seqNum=358, earliestPutTs=1733480263673 2024-12-06T10:17:44,491 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:17:44,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(8581): Flush requested on cba9fa4bb7ad0155608756e918c3bf01 2024-12-06T10:17:44,495 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37687 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-06T10:17:44,495 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HStore(1540): cba9fa4bb7ad0155608756e918c3bf01/B is initiating minor compaction (all files) 2024-12-06T10:17:44,495 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of cba9fa4bb7ad0155608756e918c3bf01/B in TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01. 2024-12-06T10:17:44,495 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/B/f1f4ede83afe4410ab17ed54b99bde65, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/B/82ea790d3a56430c9afefe432a9d08f2, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/B/b40c78e9ebac40e0ab4e47112d9e5e91] into tmpdir=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/.tmp, totalSize=36.8 K 2024-12-06T10:17:44,497 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.Compactor(224): Compacting f1f4ede83afe4410ab17ed54b99bde65, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=318, earliestPutTs=1733480263002 2024-12-06T10:17:44,499 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.Compactor(224): Compacting 82ea790d3a56430c9afefe432a9d08f2, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=343, earliestPutTs=1733480263044 2024-12-06T10:17:44,500 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.Compactor(224): Compacting b40c78e9ebac40e0ab4e47112d9e5e91, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=358, earliestPutTs=1733480263673 2024-12-06T10:17:44,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=65 2024-12-06T10:17:44,507 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,33397,1733480204743 2024-12-06T10:17:44,507 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=cba9fa4bb7ad0155608756e918c3bf01] 2024-12-06T10:17:44,508 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33397 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=66 2024-12-06T10:17:44,508 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01. 2024-12-06T10:17:44,508 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01. as already flushing 2024-12-06T10:17:44,508 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01. 2024-12-06T10:17:44,508 ERROR [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=66}] handler.RSProcedureHandler(58): pid=66 java.io.IOException: Unable to complete flush {ENCODED => cba9fa4bb7ad0155608756e918c3bf01, NAME => 'TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:17:44,509 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=66 java.io.IOException: Unable to complete flush {ENCODED => cba9fa4bb7ad0155608756e918c3bf01, NAME => 'TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:17:44,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.HMaster(4114): Remote procedure failed, pid=66 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => cba9fa4bb7ad0155608756e918c3bf01, NAME => 'TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => cba9fa4bb7ad0155608756e918c3bf01, NAME => 'TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:17:44,511 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): cba9fa4bb7ad0155608756e918c3bf01#B#compaction#222 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-06T10:17:44,512 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/.tmp/B/902f66c01a8842629f3bd2c06c1f19a6 is 50, key is test_row_0/B:col10/1733480264303/Put/seqid=0 2024-12-06T10:17:44,515 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:44,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 179 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43884 deadline: 1733480324507, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:44,516 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:44,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 181 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43906 deadline: 1733480324509, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:44,516 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:44,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 177 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43864 deadline: 1733480324509, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:44,517 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:44,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 179 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43930 deadline: 1733480324512, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:44,517 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:44,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 181 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43880 deadline: 1733480324512, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:44,522 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e202412060d0248ae206043c7b37496c1d123cf38_cba9fa4bb7ad0155608756e918c3bf01 store=[table=TestAcidGuarantees family=A region=cba9fa4bb7ad0155608756e918c3bf01] 2024-12-06T10:17:44,524 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e202412060d0248ae206043c7b37496c1d123cf38_cba9fa4bb7ad0155608756e918c3bf01, store=[table=TestAcidGuarantees family=A region=cba9fa4bb7ad0155608756e918c3bf01] 2024-12-06T10:17:44,524 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412060d0248ae206043c7b37496c1d123cf38_cba9fa4bb7ad0155608756e918c3bf01 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=cba9fa4bb7ad0155608756e918c3bf01] 2024-12-06T10:17:44,530 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241206409da238d8b04f488aee351cb5aa6add_cba9fa4bb7ad0155608756e918c3bf01 is 50, key is test_row_0/A:col10/1733480264385/Put/seqid=0 2024-12-06T10:17:44,556 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742089_1265 (size=13187) 2024-12-06T10:17:44,565 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742090_1266 (size=4469) 2024-12-06T10:17:44,569 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742091_1267 (size=14994) 2024-12-06T10:17:44,570 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:44,574 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241206409da238d8b04f488aee351cb5aa6add_cba9fa4bb7ad0155608756e918c3bf01 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241206409da238d8b04f488aee351cb5aa6add_cba9fa4bb7ad0155608756e918c3bf01 2024-12-06T10:17:44,575 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/.tmp/A/c5590d8e154245279e74d0f0afe2d0c2, store: [table=TestAcidGuarantees family=A region=cba9fa4bb7ad0155608756e918c3bf01] 2024-12-06T10:17:44,576 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/.tmp/A/c5590d8e154245279e74d0f0afe2d0c2 is 175, key is test_row_0/A:col10/1733480264385/Put/seqid=0 2024-12-06T10:17:44,584 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742092_1268 (size=39949) 2024-12-06T10:17:44,585 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=383, memsize=47.0 K, hasBloomFilter=true, into tmp file hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/.tmp/A/c5590d8e154245279e74d0f0afe2d0c2 2024-12-06T10:17:44,596 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/.tmp/B/28d3b86d569647448ffe3ce308a44d58 is 50, key is test_row_0/B:col10/1733480264385/Put/seqid=0 2024-12-06T10:17:44,602 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742093_1269 (size=12301) 2024-12-06T10:17:44,621 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:44,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 181 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43884 deadline: 1733480324617, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:44,621 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:44,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 183 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43906 deadline: 1733480324617, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:44,622 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:44,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 179 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43864 deadline: 1733480324617, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:44,622 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:44,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 181 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43930 deadline: 1733480324618, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:44,622 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:44,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 183 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43880 deadline: 1733480324618, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:44,661 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,33397,1733480204743 2024-12-06T10:17:44,661 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33397 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=66 2024-12-06T10:17:44,661 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01. 2024-12-06T10:17:44,661 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01. as already flushing 2024-12-06T10:17:44,662 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01. 2024-12-06T10:17:44,662 ERROR [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=66}] handler.RSProcedureHandler(58): pid=66 java.io.IOException: Unable to complete flush {ENCODED => cba9fa4bb7ad0155608756e918c3bf01, NAME => 'TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:17:44,662 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=66 java.io.IOException: Unable to complete flush {ENCODED => cba9fa4bb7ad0155608756e918c3bf01, NAME => 'TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:17:44,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.HMaster(4114): Remote procedure failed, pid=66 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => cba9fa4bb7ad0155608756e918c3bf01, NAME => 'TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => cba9fa4bb7ad0155608756e918c3bf01, NAME => 'TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:17:44,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=65 2024-12-06T10:17:44,814 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,33397,1733480204743 2024-12-06T10:17:44,814 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33397 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=66 2024-12-06T10:17:44,815 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01. 2024-12-06T10:17:44,815 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01. as already flushing 2024-12-06T10:17:44,815 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01. 2024-12-06T10:17:44,815 ERROR [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=66}] handler.RSProcedureHandler(58): pid=66 java.io.IOException: Unable to complete flush {ENCODED => cba9fa4bb7ad0155608756e918c3bf01, NAME => 'TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:17:44,815 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=66 java.io.IOException: Unable to complete flush {ENCODED => cba9fa4bb7ad0155608756e918c3bf01, NAME => 'TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:17:44,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.HMaster(4114): Remote procedure failed, pid=66 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => cba9fa4bb7ad0155608756e918c3bf01, NAME => 'TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => cba9fa4bb7ad0155608756e918c3bf01, NAME => 'TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:17:44,825 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:44,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 183 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43884 deadline: 1733480324822, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:44,826 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:44,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 185 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43906 deadline: 1733480324823, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:44,826 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:44,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 181 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43864 deadline: 1733480324823, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:44,828 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:44,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 183 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43930 deadline: 1733480324824, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:44,828 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:44,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 185 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43880 deadline: 1733480324824, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:44,962 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/.tmp/B/902f66c01a8842629f3bd2c06c1f19a6 as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/B/902f66c01a8842629f3bd2c06c1f19a6 2024-12-06T10:17:44,967 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in cba9fa4bb7ad0155608756e918c3bf01/B of cba9fa4bb7ad0155608756e918c3bf01 into 902f66c01a8842629f3bd2c06c1f19a6(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-06T10:17:44,967 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for cba9fa4bb7ad0155608756e918c3bf01: 2024-12-06T10:17:44,967 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01., storeName=cba9fa4bb7ad0155608756e918c3bf01/B, priority=13, startTime=1733480264488; duration=0sec 2024-12-06T10:17:44,967 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-06T10:17:44,967 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: cba9fa4bb7ad0155608756e918c3bf01:B 2024-12-06T10:17:44,967 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-06T10:17:44,969 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37687 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-06T10:17:44,969 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): cba9fa4bb7ad0155608756e918c3bf01#A#compaction#221 average throughput is 0.05 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-06T10:17:44,969 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HStore(1540): cba9fa4bb7ad0155608756e918c3bf01/C is initiating minor compaction (all files) 2024-12-06T10:17:44,969 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of cba9fa4bb7ad0155608756e918c3bf01/C in TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01. 2024-12-06T10:17:44,969 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/C/8483621323574f5791e8ec9e9f578611, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/C/f942246d18f1462a9530ef9aca33f078, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/C/994c2f14d02f42bfa81e25d6d3672ff5] into tmpdir=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/.tmp, totalSize=36.8 K 2024-12-06T10:17:44,969 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/.tmp/A/1fbe33e68d2f48858d1b80ac3e9cb16e is 175, key is test_row_0/A:col10/1733480264303/Put/seqid=0 2024-12-06T10:17:44,970 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.Compactor(224): Compacting 8483621323574f5791e8ec9e9f578611, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=318, earliestPutTs=1733480263002 2024-12-06T10:17:44,970 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.Compactor(224): Compacting f942246d18f1462a9530ef9aca33f078, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=343, earliestPutTs=1733480263044 2024-12-06T10:17:44,971 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.Compactor(224): Compacting 994c2f14d02f42bfa81e25d6d3672ff5, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=358, earliestPutTs=1733480263673 2024-12-06T10:17:44,973 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742094_1270 (size=32141) 2024-12-06T10:17:44,979 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/.tmp/A/1fbe33e68d2f48858d1b80ac3e9cb16e as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/A/1fbe33e68d2f48858d1b80ac3e9cb16e 2024-12-06T10:17:44,980 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,33397,1733480204743 2024-12-06T10:17:44,980 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33397 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=66 2024-12-06T10:17:44,981 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01. 2024-12-06T10:17:44,981 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01. as already flushing 2024-12-06T10:17:44,981 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01. 2024-12-06T10:17:44,981 ERROR [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=66}] handler.RSProcedureHandler(58): pid=66 java.io.IOException: Unable to complete flush {ENCODED => cba9fa4bb7ad0155608756e918c3bf01, NAME => 'TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:17:44,981 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=66 java.io.IOException: Unable to complete flush {ENCODED => cba9fa4bb7ad0155608756e918c3bf01, NAME => 'TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:17:44,981 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): cba9fa4bb7ad0155608756e918c3bf01#C#compaction#225 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-06T10:17:44,982 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/.tmp/C/eed35fecf9414d81a832b19ecf1acd2f is 50, key is test_row_0/C:col10/1733480264303/Put/seqid=0 2024-12-06T10:17:44,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.HMaster(4114): Remote procedure failed, pid=66 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => cba9fa4bb7ad0155608756e918c3bf01, NAME => 'TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => cba9fa4bb7ad0155608756e918c3bf01, NAME => 'TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:17:44,986 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in cba9fa4bb7ad0155608756e918c3bf01/A of cba9fa4bb7ad0155608756e918c3bf01 into 1fbe33e68d2f48858d1b80ac3e9cb16e(size=31.4 K), total size for store is 31.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-06T10:17:44,986 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for cba9fa4bb7ad0155608756e918c3bf01: 2024-12-06T10:17:44,986 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01., storeName=cba9fa4bb7ad0155608756e918c3bf01/A, priority=13, startTime=1733480264488; duration=0sec 2024-12-06T10:17:44,986 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T10:17:44,986 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: cba9fa4bb7ad0155608756e918c3bf01:A 2024-12-06T10:17:44,990 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742095_1271 (size=13187) 2024-12-06T10:17:45,003 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=383 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/.tmp/B/28d3b86d569647448ffe3ce308a44d58 2024-12-06T10:17:45,013 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/.tmp/C/f4791db864cc4a799a3d332c3e55af10 is 50, key is test_row_0/C:col10/1733480264385/Put/seqid=0 2024-12-06T10:17:45,018 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742096_1272 (size=12301) 2024-12-06T10:17:45,018 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=383 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/.tmp/C/f4791db864cc4a799a3d332c3e55af10 2024-12-06T10:17:45,023 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/.tmp/A/c5590d8e154245279e74d0f0afe2d0c2 as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/A/c5590d8e154245279e74d0f0afe2d0c2 2024-12-06T10:17:45,029 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/A/c5590d8e154245279e74d0f0afe2d0c2, entries=200, sequenceid=383, filesize=39.0 K 2024-12-06T10:17:45,030 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/.tmp/B/28d3b86d569647448ffe3ce308a44d58 as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/B/28d3b86d569647448ffe3ce308a44d58 2024-12-06T10:17:45,033 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/B/28d3b86d569647448ffe3ce308a44d58, entries=150, sequenceid=383, filesize=12.0 K 2024-12-06T10:17:45,034 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/.tmp/C/f4791db864cc4a799a3d332c3e55af10 as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/C/f4791db864cc4a799a3d332c3e55af10 2024-12-06T10:17:45,038 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/C/f4791db864cc4a799a3d332c3e55af10, entries=150, sequenceid=383, filesize=12.0 K 2024-12-06T10:17:45,038 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=67.09 KB/68700 for cba9fa4bb7ad0155608756e918c3bf01 in 547ms, sequenceid=383, compaction requested=false 2024-12-06T10:17:45,039 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for cba9fa4bb7ad0155608756e918c3bf01: 2024-12-06T10:17:45,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(8581): Flush requested on cba9fa4bb7ad0155608756e918c3bf01 2024-12-06T10:17:45,128 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing cba9fa4bb7ad0155608756e918c3bf01 3/3 column families, dataSize=73.80 KB heapSize=194.11 KB 2024-12-06T10:17:45,129 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cba9fa4bb7ad0155608756e918c3bf01, store=A 2024-12-06T10:17:45,129 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:17:45,129 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cba9fa4bb7ad0155608756e918c3bf01, store=B 2024-12-06T10:17:45,129 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:17:45,129 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cba9fa4bb7ad0155608756e918c3bf01, store=C 2024-12-06T10:17:45,129 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:17:45,133 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,33397,1733480204743 2024-12-06T10:17:45,134 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33397 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=66 2024-12-06T10:17:45,134 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01. 2024-12-06T10:17:45,134 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01. as already flushing 2024-12-06T10:17:45,134 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01. 2024-12-06T10:17:45,134 ERROR [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=66}] handler.RSProcedureHandler(58): pid=66 java.io.IOException: Unable to complete flush {ENCODED => cba9fa4bb7ad0155608756e918c3bf01, NAME => 'TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:17:45,134 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=66 java.io.IOException: Unable to complete flush {ENCODED => cba9fa4bb7ad0155608756e918c3bf01, NAME => 'TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:17:45,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.HMaster(4114): Remote procedure failed, pid=66 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => cba9fa4bb7ad0155608756e918c3bf01, NAME => 'TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => cba9fa4bb7ad0155608756e918c3bf01, NAME => 'TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:17:45,137 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412068818f01578c44474b25db886c4dcbebe_cba9fa4bb7ad0155608756e918c3bf01 is 50, key is test_row_0/A:col10/1733480264499/Put/seqid=0 2024-12-06T10:17:45,141 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742097_1273 (size=14994) 2024-12-06T10:17:45,177 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:45,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 191 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43906 deadline: 1733480325174, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:45,177 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:45,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 187 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43864 deadline: 1733480325174, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:45,177 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:45,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 190 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43884 deadline: 1733480325174, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:45,178 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:45,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 191 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43880 deadline: 1733480325175, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:45,178 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:45,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 189 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43930 deadline: 1733480325175, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:45,282 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:45,282 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 193 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43906 deadline: 1733480325278, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:45,282 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:45,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 189 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43864 deadline: 1733480325278, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:45,283 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:45,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 192 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43884 deadline: 1733480325278, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:45,283 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:45,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 193 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43880 deadline: 1733480325278, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:45,283 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:45,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 191 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43930 deadline: 1733480325279, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:45,286 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,33397,1733480204743 2024-12-06T10:17:45,287 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33397 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=66 2024-12-06T10:17:45,287 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01. 2024-12-06T10:17:45,287 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01. as already flushing 2024-12-06T10:17:45,287 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01. 2024-12-06T10:17:45,287 ERROR [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=66}] handler.RSProcedureHandler(58): pid=66 java.io.IOException: Unable to complete flush {ENCODED => cba9fa4bb7ad0155608756e918c3bf01, NAME => 'TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:17:45,287 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=66 java.io.IOException: Unable to complete flush {ENCODED => cba9fa4bb7ad0155608756e918c3bf01, NAME => 'TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:17:45,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.HMaster(4114): Remote procedure failed, pid=66 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => cba9fa4bb7ad0155608756e918c3bf01, NAME => 'TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => cba9fa4bb7ad0155608756e918c3bf01, NAME => 'TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:17:45,304 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=65 2024-12-06T10:17:45,396 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/.tmp/C/eed35fecf9414d81a832b19ecf1acd2f as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/C/eed35fecf9414d81a832b19ecf1acd2f 2024-12-06T10:17:45,401 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in cba9fa4bb7ad0155608756e918c3bf01/C of cba9fa4bb7ad0155608756e918c3bf01 into eed35fecf9414d81a832b19ecf1acd2f(size=12.9 K), total size for store is 24.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-06T10:17:45,401 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for cba9fa4bb7ad0155608756e918c3bf01: 2024-12-06T10:17:45,401 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01., storeName=cba9fa4bb7ad0155608756e918c3bf01/C, priority=13, startTime=1733480264488; duration=0sec 2024-12-06T10:17:45,401 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T10:17:45,401 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: cba9fa4bb7ad0155608756e918c3bf01:C 2024-12-06T10:17:45,439 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,33397,1733480204743 2024-12-06T10:17:45,439 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33397 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=66 2024-12-06T10:17:45,439 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01. 2024-12-06T10:17:45,439 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01. as already flushing 2024-12-06T10:17:45,439 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01. 2024-12-06T10:17:45,440 ERROR [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=66}] handler.RSProcedureHandler(58): pid=66 java.io.IOException: Unable to complete flush {ENCODED => cba9fa4bb7ad0155608756e918c3bf01, NAME => 'TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:17:45,440 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=66 java.io.IOException: Unable to complete flush {ENCODED => cba9fa4bb7ad0155608756e918c3bf01, NAME => 'TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:17:45,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.HMaster(4114): Remote procedure failed, pid=66 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => cba9fa4bb7ad0155608756e918c3bf01, NAME => 'TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => cba9fa4bb7ad0155608756e918c3bf01, NAME => 'TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:17:45,486 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:45,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 191 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43864 deadline: 1733480325484, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:45,487 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:45,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 195 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43880 deadline: 1733480325484, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:45,488 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:45,488 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:45,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 193 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43930 deadline: 1733480325484, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:45,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 194 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43884 deadline: 1733480325484, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:45,488 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:45,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 195 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43906 deadline: 1733480325484, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:45,542 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:45,552 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412068818f01578c44474b25db886c4dcbebe_cba9fa4bb7ad0155608756e918c3bf01 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412068818f01578c44474b25db886c4dcbebe_cba9fa4bb7ad0155608756e918c3bf01 2024-12-06T10:17:45,554 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/.tmp/A/481ec626f7ef4ea99031d4ef80f51b4e, store: [table=TestAcidGuarantees family=A region=cba9fa4bb7ad0155608756e918c3bf01] 2024-12-06T10:17:45,555 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/.tmp/A/481ec626f7ef4ea99031d4ef80f51b4e is 175, key is test_row_0/A:col10/1733480264499/Put/seqid=0 2024-12-06T10:17:45,562 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742098_1274 (size=39949) 2024-12-06T10:17:45,565 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=399, memsize=24.6 K, hasBloomFilter=true, into tmp file hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/.tmp/A/481ec626f7ef4ea99031d4ef80f51b4e 2024-12-06T10:17:45,575 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/.tmp/B/6c1e24b6e5de448b9a6b3106e72ecfea is 50, key is test_row_0/B:col10/1733480264499/Put/seqid=0 2024-12-06T10:17:45,580 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742099_1275 (size=12301) 2024-12-06T10:17:45,591 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,33397,1733480204743 2024-12-06T10:17:45,591 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33397 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=66 2024-12-06T10:17:45,592 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01. 2024-12-06T10:17:45,592 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01. as already flushing 2024-12-06T10:17:45,592 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01. 2024-12-06T10:17:45,592 ERROR [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=66}] handler.RSProcedureHandler(58): pid=66 java.io.IOException: Unable to complete flush {ENCODED => cba9fa4bb7ad0155608756e918c3bf01, NAME => 'TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:17:45,592 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=66 java.io.IOException: Unable to complete flush {ENCODED => cba9fa4bb7ad0155608756e918c3bf01, NAME => 'TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:17:45,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.HMaster(4114): Remote procedure failed, pid=66 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => cba9fa4bb7ad0155608756e918c3bf01, NAME => 'TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => cba9fa4bb7ad0155608756e918c3bf01, NAME => 'TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:17:45,745 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,33397,1733480204743 2024-12-06T10:17:45,746 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33397 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=66 2024-12-06T10:17:45,746 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01. 2024-12-06T10:17:45,746 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01. as already flushing 2024-12-06T10:17:45,746 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01. 2024-12-06T10:17:45,746 ERROR [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=66}] handler.RSProcedureHandler(58): pid=66 java.io.IOException: Unable to complete flush {ENCODED => cba9fa4bb7ad0155608756e918c3bf01, NAME => 'TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:17:45,746 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=66 java.io.IOException: Unable to complete flush {ENCODED => cba9fa4bb7ad0155608756e918c3bf01, NAME => 'TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:17:45,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.HMaster(4114): Remote procedure failed, pid=66 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => cba9fa4bb7ad0155608756e918c3bf01, NAME => 'TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => cba9fa4bb7ad0155608756e918c3bf01, NAME => 'TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:17:45,789 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:45,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 193 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43864 deadline: 1733480325788, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:45,790 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:45,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 195 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43930 deadline: 1733480325789, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:45,790 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:45,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 197 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43880 deadline: 1733480325789, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:45,792 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:45,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 197 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43906 deadline: 1733480325790, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:45,792 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:45,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 196 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43884 deadline: 1733480325791, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:45,899 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,33397,1733480204743 2024-12-06T10:17:45,900 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33397 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=66 2024-12-06T10:17:45,900 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01. 2024-12-06T10:17:45,900 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01. as already flushing 2024-12-06T10:17:45,900 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01. 2024-12-06T10:17:45,900 ERROR [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=66}] handler.RSProcedureHandler(58): pid=66 java.io.IOException: Unable to complete flush {ENCODED => cba9fa4bb7ad0155608756e918c3bf01, NAME => 'TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:17:45,900 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=66 java.io.IOException: Unable to complete flush {ENCODED => cba9fa4bb7ad0155608756e918c3bf01, NAME => 'TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:17:45,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.HMaster(4114): Remote procedure failed, pid=66 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => cba9fa4bb7ad0155608756e918c3bf01, NAME => 'TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => cba9fa4bb7ad0155608756e918c3bf01, NAME => 'TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:17:45,981 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=399 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/.tmp/B/6c1e24b6e5de448b9a6b3106e72ecfea 2024-12-06T10:17:45,989 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/.tmp/C/824f687360794fcd9bcb79b8588ef24b is 50, key is test_row_0/C:col10/1733480264499/Put/seqid=0 2024-12-06T10:17:45,997 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742100_1276 (size=12301) 2024-12-06T10:17:45,998 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=399 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/.tmp/C/824f687360794fcd9bcb79b8588ef24b 2024-12-06T10:17:45,998 DEBUG [Thread-777 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x78cafade to 127.0.0.1:61610 2024-12-06T10:17:45,999 DEBUG [Thread-777 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-06T10:17:46,001 DEBUG [Thread-775 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x00cb464a to 127.0.0.1:61610 2024-12-06T10:17:46,001 DEBUG [Thread-775 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-06T10:17:46,002 DEBUG [Thread-781 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x0341384e to 127.0.0.1:61610 2024-12-06T10:17:46,002 DEBUG [Thread-781 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-06T10:17:46,002 DEBUG [Thread-779 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x14c16cd4 to 127.0.0.1:61610 2024-12-06T10:17:46,002 DEBUG [Thread-779 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-06T10:17:46,003 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/.tmp/A/481ec626f7ef4ea99031d4ef80f51b4e as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/A/481ec626f7ef4ea99031d4ef80f51b4e 2024-12-06T10:17:46,007 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/A/481ec626f7ef4ea99031d4ef80f51b4e, entries=200, sequenceid=399, filesize=39.0 K 2024-12-06T10:17:46,008 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/.tmp/B/6c1e24b6e5de448b9a6b3106e72ecfea as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/B/6c1e24b6e5de448b9a6b3106e72ecfea 2024-12-06T10:17:46,012 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/B/6c1e24b6e5de448b9a6b3106e72ecfea, entries=150, sequenceid=399, filesize=12.0 K 2024-12-06T10:17:46,013 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/.tmp/C/824f687360794fcd9bcb79b8588ef24b as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/C/824f687360794fcd9bcb79b8588ef24b 2024-12-06T10:17:46,016 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/C/824f687360794fcd9bcb79b8588ef24b, entries=150, sequenceid=399, filesize=12.0 K 2024-12-06T10:17:46,017 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~73.80 KB/75570, heapSize ~194.06 KB/198720, currentSize=134.18 KB/137400 for cba9fa4bb7ad0155608756e918c3bf01 in 889ms, sequenceid=399, compaction requested=true 2024-12-06T10:17:46,017 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for cba9fa4bb7ad0155608756e918c3bf01: 2024-12-06T10:17:46,017 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store cba9fa4bb7ad0155608756e918c3bf01:A, priority=-2147483648, current under compaction store size is 1 2024-12-06T10:17:46,017 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T10:17:46,017 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-06T10:17:46,017 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store cba9fa4bb7ad0155608756e918c3bf01:B, priority=-2147483648, current under compaction store size is 2 2024-12-06T10:17:46,017 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T10:17:46,017 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store cba9fa4bb7ad0155608756e918c3bf01:C, priority=-2147483648, current under compaction store size is 3 2024-12-06T10:17:46,017 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-06T10:17:46,017 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-06T10:17:46,018 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 112039 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-06T10:17:46,018 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HStore(1540): cba9fa4bb7ad0155608756e918c3bf01/A is initiating minor compaction (all files) 2024-12-06T10:17:46,018 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of cba9fa4bb7ad0155608756e918c3bf01/A in TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01. 2024-12-06T10:17:46,018 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37789 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-06T10:17:46,018 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HStore(1540): cba9fa4bb7ad0155608756e918c3bf01/B is initiating minor compaction (all files) 2024-12-06T10:17:46,018 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/A/1fbe33e68d2f48858d1b80ac3e9cb16e, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/A/c5590d8e154245279e74d0f0afe2d0c2, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/A/481ec626f7ef4ea99031d4ef80f51b4e] into tmpdir=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/.tmp, totalSize=109.4 K 2024-12-06T10:17:46,018 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of cba9fa4bb7ad0155608756e918c3bf01/B in TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01. 2024-12-06T10:17:46,018 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01. 2024-12-06T10:17:46,018 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/B/902f66c01a8842629f3bd2c06c1f19a6, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/B/28d3b86d569647448ffe3ce308a44d58, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/B/6c1e24b6e5de448b9a6b3106e72ecfea] into tmpdir=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/.tmp, totalSize=36.9 K 2024-12-06T10:17:46,018 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01. files: [hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/A/1fbe33e68d2f48858d1b80ac3e9cb16e, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/A/c5590d8e154245279e74d0f0afe2d0c2, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/A/481ec626f7ef4ea99031d4ef80f51b4e] 2024-12-06T10:17:46,019 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.Compactor(224): Compacting 902f66c01a8842629f3bd2c06c1f19a6, keycount=150, bloomtype=ROW, size=12.9 K, encoding=NONE, compression=NONE, seqNum=358, earliestPutTs=1733480263673 2024-12-06T10:17:46,019 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1fbe33e68d2f48858d1b80ac3e9cb16e, keycount=150, bloomtype=ROW, size=31.4 K, encoding=NONE, compression=NONE, seqNum=358, earliestPutTs=1733480263673 2024-12-06T10:17:46,019 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.Compactor(224): Compacting c5590d8e154245279e74d0f0afe2d0c2, keycount=200, bloomtype=ROW, size=39.0 K, encoding=NONE, compression=NONE, seqNum=383, earliestPutTs=1733480264373 2024-12-06T10:17:46,019 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.Compactor(224): Compacting 28d3b86d569647448ffe3ce308a44d58, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=383, earliestPutTs=1733480264373 2024-12-06T10:17:46,019 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.Compactor(224): Compacting 481ec626f7ef4ea99031d4ef80f51b4e, keycount=200, bloomtype=ROW, size=39.0 K, encoding=NONE, compression=NONE, seqNum=399, earliestPutTs=1733480264499 2024-12-06T10:17:46,019 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.Compactor(224): Compacting 6c1e24b6e5de448b9a6b3106e72ecfea, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=399, earliestPutTs=1733480264499 2024-12-06T10:17:46,028 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): cba9fa4bb7ad0155608756e918c3bf01#B#compaction#230 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-06T10:17:46,029 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/.tmp/B/9e7577adbe564bf7acbe92a2200c48f4 is 50, key is test_row_0/B:col10/1733480264499/Put/seqid=0 2024-12-06T10:17:46,032 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=cba9fa4bb7ad0155608756e918c3bf01] 2024-12-06T10:17:46,038 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e202412060f6d65e09568470d875833d883a39989_cba9fa4bb7ad0155608756e918c3bf01 store=[table=TestAcidGuarantees family=A region=cba9fa4bb7ad0155608756e918c3bf01] 2024-12-06T10:17:46,040 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742101_1277 (size=13289) 2024-12-06T10:17:46,054 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,33397,1733480204743 2024-12-06T10:17:46,054 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33397 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=66 2024-12-06T10:17:46,054 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01. 2024-12-06T10:17:46,055 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.HRegion(2837): Flushing cba9fa4bb7ad0155608756e918c3bf01 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-12-06T10:17:46,055 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cba9fa4bb7ad0155608756e918c3bf01, store=A 2024-12-06T10:17:46,055 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:17:46,055 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cba9fa4bb7ad0155608756e918c3bf01, store=B 2024-12-06T10:17:46,055 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:17:46,055 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cba9fa4bb7ad0155608756e918c3bf01, store=C 2024-12-06T10:17:46,055 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:17:46,079 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=66}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412060006e8ac5913438eae6b6ff775b4ca1b_cba9fa4bb7ad0155608756e918c3bf01 is 50, key is test_row_0/A:col10/1733480265174/Put/seqid=0 2024-12-06T10:17:46,081 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e202412060f6d65e09568470d875833d883a39989_cba9fa4bb7ad0155608756e918c3bf01, store=[table=TestAcidGuarantees family=A region=cba9fa4bb7ad0155608756e918c3bf01] 2024-12-06T10:17:46,081 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412060f6d65e09568470d875833d883a39989_cba9fa4bb7ad0155608756e918c3bf01 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=cba9fa4bb7ad0155608756e918c3bf01] 2024-12-06T10:17:46,085 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742102_1278 (size=12454) 2024-12-06T10:17:46,090 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=66}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:46,091 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742103_1279 (size=4469) 2024-12-06T10:17:46,093 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): cba9fa4bb7ad0155608756e918c3bf01#A#compaction#231 average throughput is 0.40 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-06T10:17:46,094 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/.tmp/A/ed7f6648f9eb461f8ee49c8647936eae is 175, key is test_row_0/A:col10/1733480264499/Put/seqid=0 2024-12-06T10:17:46,095 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412060006e8ac5913438eae6b6ff775b4ca1b_cba9fa4bb7ad0155608756e918c3bf01 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412060006e8ac5913438eae6b6ff775b4ca1b_cba9fa4bb7ad0155608756e918c3bf01 2024-12-06T10:17:46,096 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=66}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/.tmp/A/8c09a2471fcd43dcb99730b60f38e55b, store: [table=TestAcidGuarantees family=A region=cba9fa4bb7ad0155608756e918c3bf01] 2024-12-06T10:17:46,096 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=66}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/.tmp/A/8c09a2471fcd43dcb99730b60f38e55b is 175, key is test_row_0/A:col10/1733480265174/Put/seqid=0 2024-12-06T10:17:46,102 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742104_1280 (size=32243) 2024-12-06T10:17:46,111 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742105_1281 (size=31255) 2024-12-06T10:17:46,112 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=66}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=423, memsize=44.7 K, hasBloomFilter=true, into tmp file hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/.tmp/A/8c09a2471fcd43dcb99730b60f38e55b 2024-12-06T10:17:46,124 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=66}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/.tmp/B/87e373b242394b52a678243644391fcc is 50, key is test_row_0/B:col10/1733480265174/Put/seqid=0 2024-12-06T10:17:46,128 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742106_1282 (size=12301) 2024-12-06T10:17:46,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(8581): Flush requested on cba9fa4bb7ad0155608756e918c3bf01 2024-12-06T10:17:46,293 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01. as already flushing 2024-12-06T10:17:46,293 DEBUG [Thread-764 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x46c2c778 to 127.0.0.1:61610 2024-12-06T10:17:46,293 DEBUG [Thread-764 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-06T10:17:46,296 DEBUG [Thread-770 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x24f64590 to 127.0.0.1:61610 2024-12-06T10:17:46,296 DEBUG [Thread-770 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-06T10:17:46,297 DEBUG [Thread-772 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x7c5c4716 to 127.0.0.1:61610 2024-12-06T10:17:46,297 DEBUG [Thread-772 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-06T10:17:46,299 DEBUG [Thread-766 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x433e2b26 to 127.0.0.1:61610 2024-12-06T10:17:46,299 DEBUG [Thread-766 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-06T10:17:46,299 DEBUG [Thread-768 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x0e3a4420 to 127.0.0.1:61610 2024-12-06T10:17:46,299 DEBUG [Thread-768 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-06T10:17:46,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=65 2024-12-06T10:17:46,447 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/.tmp/B/9e7577adbe564bf7acbe92a2200c48f4 as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/B/9e7577adbe564bf7acbe92a2200c48f4 2024-12-06T10:17:46,452 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in cba9fa4bb7ad0155608756e918c3bf01/B of cba9fa4bb7ad0155608756e918c3bf01 into 9e7577adbe564bf7acbe92a2200c48f4(size=13.0 K), total size for store is 13.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-06T10:17:46,452 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for cba9fa4bb7ad0155608756e918c3bf01: 2024-12-06T10:17:46,452 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01., storeName=cba9fa4bb7ad0155608756e918c3bf01/B, priority=13, startTime=1733480266017; duration=0sec 2024-12-06T10:17:46,452 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-06T10:17:46,452 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: cba9fa4bb7ad0155608756e918c3bf01:B 2024-12-06T10:17:46,452 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-06T10:17:46,453 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37789 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-06T10:17:46,453 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HStore(1540): cba9fa4bb7ad0155608756e918c3bf01/C is initiating minor compaction (all files) 2024-12-06T10:17:46,453 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of cba9fa4bb7ad0155608756e918c3bf01/C in TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01. 2024-12-06T10:17:46,453 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/C/eed35fecf9414d81a832b19ecf1acd2f, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/C/f4791db864cc4a799a3d332c3e55af10, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/C/824f687360794fcd9bcb79b8588ef24b] into tmpdir=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/.tmp, totalSize=36.9 K 2024-12-06T10:17:46,454 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.Compactor(224): Compacting eed35fecf9414d81a832b19ecf1acd2f, keycount=150, bloomtype=ROW, size=12.9 K, encoding=NONE, compression=NONE, seqNum=358, earliestPutTs=1733480263673 2024-12-06T10:17:46,454 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.Compactor(224): Compacting f4791db864cc4a799a3d332c3e55af10, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=383, earliestPutTs=1733480264373 2024-12-06T10:17:46,454 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.Compactor(224): Compacting 824f687360794fcd9bcb79b8588ef24b, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=399, earliestPutTs=1733480264499 2024-12-06T10:17:46,463 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): cba9fa4bb7ad0155608756e918c3bf01#C#compaction#234 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-06T10:17:46,464 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/.tmp/C/272d796f7ccd4ae4a734715294151331 is 50, key is test_row_0/C:col10/1733480264499/Put/seqid=0 2024-12-06T10:17:46,471 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742107_1283 (size=13289) 2024-12-06T10:17:46,479 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/.tmp/C/272d796f7ccd4ae4a734715294151331 as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/C/272d796f7ccd4ae4a734715294151331 2024-12-06T10:17:46,484 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in cba9fa4bb7ad0155608756e918c3bf01/C of cba9fa4bb7ad0155608756e918c3bf01 into 272d796f7ccd4ae4a734715294151331(size=13.0 K), total size for store is 13.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-06T10:17:46,485 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for cba9fa4bb7ad0155608756e918c3bf01: 2024-12-06T10:17:46,485 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01., storeName=cba9fa4bb7ad0155608756e918c3bf01/C, priority=13, startTime=1733480266017; duration=0sec 2024-12-06T10:17:46,485 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T10:17:46,485 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: cba9fa4bb7ad0155608756e918c3bf01:C 2024-12-06T10:17:46,508 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/.tmp/A/ed7f6648f9eb461f8ee49c8647936eae as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/A/ed7f6648f9eb461f8ee49c8647936eae 2024-12-06T10:17:46,514 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in cba9fa4bb7ad0155608756e918c3bf01/A of cba9fa4bb7ad0155608756e918c3bf01 into ed7f6648f9eb461f8ee49c8647936eae(size=31.5 K), total size for store is 31.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-06T10:17:46,514 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for cba9fa4bb7ad0155608756e918c3bf01: 2024-12-06T10:17:46,514 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01., storeName=cba9fa4bb7ad0155608756e918c3bf01/A, priority=13, startTime=1733480266017; duration=0sec 2024-12-06T10:17:46,515 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T10:17:46,515 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: cba9fa4bb7ad0155608756e918c3bf01:A 2024-12-06T10:17:46,529 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=423 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/.tmp/B/87e373b242394b52a678243644391fcc 2024-12-06T10:17:46,536 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=66}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/.tmp/C/4971f693e24d44bbb720575d1c8b3140 is 50, key is test_row_0/C:col10/1733480265174/Put/seqid=0 2024-12-06T10:17:46,540 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742108_1284 (size=12301) 2024-12-06T10:17:46,540 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=423 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/.tmp/C/4971f693e24d44bbb720575d1c8b3140 2024-12-06T10:17:46,544 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/.tmp/A/8c09a2471fcd43dcb99730b60f38e55b as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/A/8c09a2471fcd43dcb99730b60f38e55b 2024-12-06T10:17:46,548 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/A/8c09a2471fcd43dcb99730b60f38e55b, entries=150, sequenceid=423, filesize=30.5 K 2024-12-06T10:17:46,549 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/.tmp/B/87e373b242394b52a678243644391fcc as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/B/87e373b242394b52a678243644391fcc 2024-12-06T10:17:46,552 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/B/87e373b242394b52a678243644391fcc, entries=150, sequenceid=423, filesize=12.0 K 2024-12-06T10:17:46,553 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/.tmp/C/4971f693e24d44bbb720575d1c8b3140 as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/C/4971f693e24d44bbb720575d1c8b3140 2024-12-06T10:17:46,557 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/C/4971f693e24d44bbb720575d1c8b3140, entries=150, sequenceid=423, filesize=12.0 K 2024-12-06T10:17:46,558 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=33.54 KB/34350 for cba9fa4bb7ad0155608756e918c3bf01 in 503ms, sequenceid=423, compaction requested=false 2024-12-06T10:17:46,558 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.HRegion(2538): Flush status journal for cba9fa4bb7ad0155608756e918c3bf01: 2024-12-06T10:17:46,558 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01. 2024-12-06T10:17:46,558 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=66 2024-12-06T10:17:46,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.HMaster(4106): Remote procedure done, pid=66 2024-12-06T10:17:46,561 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=66, resume processing ppid=65 2024-12-06T10:17:46,561 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=66, ppid=65, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.3570 sec 2024-12-06T10:17:46,562 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=65, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=65, table=TestAcidGuarantees in 2.3710 sec 2024-12-06T10:17:48,306 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=65 2024-12-06T10:17:48,306 INFO [Thread-774 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 65 completed 2024-12-06T10:17:48,306 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(392): Finished test. Writers: 2024-12-06T10:17:48,306 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 64 2024-12-06T10:17:48,306 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 66 2024-12-06T10:17:48,306 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 67 2024-12-06T10:17:48,306 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 68 2024-12-06T10:17:48,306 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 68 2024-12-06T10:17:48,306 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(396): Readers: 2024-12-06T10:17:48,306 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 5978 2024-12-06T10:17:48,306 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 5884 2024-12-06T10:17:48,306 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(400): Scanners: 2024-12-06T10:17:48,306 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 2525 2024-12-06T10:17:48,306 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 7572 rows 2024-12-06T10:17:48,306 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 2526 2024-12-06T10:17:48,306 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 7577 rows 2024-12-06T10:17:48,306 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-12-06T10:17:48,306 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x0ff872d8 to 127.0.0.1:61610 2024-12-06T10:17:48,306 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-06T10:17:48,308 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of TestAcidGuarantees 2024-12-06T10:17:48,309 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.2 disable TestAcidGuarantees 2024-12-06T10:17:48,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] procedure2.ProcedureExecutor(1098): Stored pid=67, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=TestAcidGuarantees 2024-12-06T10:17:48,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=67 2024-12-06T10:17:48,313 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733480268312"}]},"ts":"1733480268312"} 2024-12-06T10:17:48,314 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLING in hbase:meta 2024-12-06T10:17:48,316 INFO [PEWorker-3 {}] procedure.DisableTableProcedure(284): Set TestAcidGuarantees to state=DISABLING 2024-12-06T10:17:48,317 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=68, ppid=67, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=TestAcidGuarantees}] 2024-12-06T10:17:48,318 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=69, ppid=68, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=cba9fa4bb7ad0155608756e918c3bf01, UNASSIGN}] 2024-12-06T10:17:48,318 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=69, ppid=68, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=cba9fa4bb7ad0155608756e918c3bf01, UNASSIGN 2024-12-06T10:17:48,319 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=69 updating hbase:meta row=cba9fa4bb7ad0155608756e918c3bf01, regionState=CLOSING, regionLocation=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:48,320 DEBUG [PEWorker-2 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-06T10:17:48,320 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=70, ppid=69, state=RUNNABLE; CloseRegionProcedure cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743}] 2024-12-06T10:17:48,360 DEBUG [master/552d6a33fa09:0.Chore.1 {}] balancer.RegionLocationFinder(172): Locality for region 4d3009c066fbf23693b61104c76d0d3b changed from -1.0 to 0.0, refreshing cache 2024-12-06T10:17:48,412 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=67 2024-12-06T10:17:48,471 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,33397,1733480204743 2024-12-06T10:17:48,471 INFO [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION, pid=70}] handler.UnassignRegionHandler(124): Close cba9fa4bb7ad0155608756e918c3bf01 2024-12-06T10:17:48,471 DEBUG [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION, pid=70}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-06T10:17:48,471 DEBUG [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION, pid=70}] regionserver.HRegion(1681): Closing cba9fa4bb7ad0155608756e918c3bf01, disabling compactions & flushes 2024-12-06T10:17:48,471 INFO [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION, pid=70}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01. 2024-12-06T10:17:48,472 DEBUG [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION, pid=70}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01. 2024-12-06T10:17:48,472 DEBUG [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION, pid=70}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01. after waiting 0 ms 2024-12-06T10:17:48,472 DEBUG [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION, pid=70}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01. 2024-12-06T10:17:48,472 INFO [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION, pid=70}] regionserver.HRegion(2837): Flushing cba9fa4bb7ad0155608756e918c3bf01 3/3 column families, dataSize=33.54 KB heapSize=88.64 KB 2024-12-06T10:17:48,472 DEBUG [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION, pid=70}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cba9fa4bb7ad0155608756e918c3bf01, store=A 2024-12-06T10:17:48,472 DEBUG [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION, pid=70}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:17:48,472 DEBUG [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION, pid=70}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cba9fa4bb7ad0155608756e918c3bf01, store=B 2024-12-06T10:17:48,472 DEBUG [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION, pid=70}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:17:48,472 DEBUG [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION, pid=70}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cba9fa4bb7ad0155608756e918c3bf01, store=C 2024-12-06T10:17:48,472 DEBUG [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION, pid=70}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:17:48,479 DEBUG [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION, pid=70}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241206547c825072f54ca8b859a97383ae2531_cba9fa4bb7ad0155608756e918c3bf01 is 50, key is test_row_1/A:col10/1733480266291/Put/seqid=0 2024-12-06T10:17:48,483 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742109_1285 (size=9914) 2024-12-06T10:17:48,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=67 2024-12-06T10:17:48,883 DEBUG [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION, pid=70}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:48,887 INFO [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION, pid=70}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241206547c825072f54ca8b859a97383ae2531_cba9fa4bb7ad0155608756e918c3bf01 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241206547c825072f54ca8b859a97383ae2531_cba9fa4bb7ad0155608756e918c3bf01 2024-12-06T10:17:48,888 DEBUG [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION, pid=70}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/.tmp/A/ba68f630b13f4116a2662f74fba04ead, store: [table=TestAcidGuarantees family=A region=cba9fa4bb7ad0155608756e918c3bf01] 2024-12-06T10:17:48,889 DEBUG [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION, pid=70}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/.tmp/A/ba68f630b13f4116a2662f74fba04ead is 175, key is test_row_1/A:col10/1733480266291/Put/seqid=0 2024-12-06T10:17:48,893 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742110_1286 (size=22561) 2024-12-06T10:17:48,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=67 2024-12-06T10:17:49,293 INFO [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION, pid=70}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=434, memsize=11.2 K, hasBloomFilter=true, into tmp file hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/.tmp/A/ba68f630b13f4116a2662f74fba04ead 2024-12-06T10:17:49,301 DEBUG [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION, pid=70}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/.tmp/B/7922ad9c5d0b4e8ca413cf01beaf39cf is 50, key is test_row_1/B:col10/1733480266291/Put/seqid=0 2024-12-06T10:17:49,304 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742111_1287 (size=9857) 2024-12-06T10:17:49,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=67 2024-12-06T10:17:49,705 INFO [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION, pid=70}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=434 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/.tmp/B/7922ad9c5d0b4e8ca413cf01beaf39cf 2024-12-06T10:17:49,712 DEBUG [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION, pid=70}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/.tmp/C/af8c58ba2e3b4bffb56cea819e42b3ab is 50, key is test_row_1/C:col10/1733480266291/Put/seqid=0 2024-12-06T10:17:49,715 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742112_1288 (size=9857) 2024-12-06T10:17:50,116 INFO [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION, pid=70}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=434 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/.tmp/C/af8c58ba2e3b4bffb56cea819e42b3ab 2024-12-06T10:17:50,120 DEBUG [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION, pid=70}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/.tmp/A/ba68f630b13f4116a2662f74fba04ead as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/A/ba68f630b13f4116a2662f74fba04ead 2024-12-06T10:17:50,124 INFO [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION, pid=70}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/A/ba68f630b13f4116a2662f74fba04ead, entries=100, sequenceid=434, filesize=22.0 K 2024-12-06T10:17:50,125 DEBUG [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION, pid=70}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/.tmp/B/7922ad9c5d0b4e8ca413cf01beaf39cf as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/B/7922ad9c5d0b4e8ca413cf01beaf39cf 2024-12-06T10:17:50,128 INFO [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION, pid=70}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/B/7922ad9c5d0b4e8ca413cf01beaf39cf, entries=100, sequenceid=434, filesize=9.6 K 2024-12-06T10:17:50,129 DEBUG [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION, pid=70}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/.tmp/C/af8c58ba2e3b4bffb56cea819e42b3ab as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/C/af8c58ba2e3b4bffb56cea819e42b3ab 2024-12-06T10:17:50,132 INFO [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION, pid=70}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/C/af8c58ba2e3b4bffb56cea819e42b3ab, entries=100, sequenceid=434, filesize=9.6 K 2024-12-06T10:17:50,133 INFO [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION, pid=70}] regionserver.HRegion(3040): Finished flush of dataSize ~33.54 KB/34350, heapSize ~88.59 KB/90720, currentSize=0 B/0 for cba9fa4bb7ad0155608756e918c3bf01 in 1661ms, sequenceid=434, compaction requested=true 2024-12-06T10:17:50,133 DEBUG [StoreCloser-TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/A/2b5b6b570a4f4312a0ccd0dbe302639d, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/A/d391df585f7140bd8830cc9e35b1ceb6, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/A/997320fd15344913abb9d00b7c0ce718, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/A/fd2a41dfc2d1455b98e91cad56cf20c0, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/A/064931b8b64440a5a32b69f20fb43a97, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/A/a2b5f018e978499db43971231708c209, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/A/672acde59450412cb03bf21f25565d91, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/A/939ada9d144b44088aef2c15bc5ee6a9, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/A/7895548bed4c48acbb7b8368b602c642, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/A/7813153bf789489d9e168fc5e3dcece1, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/A/7699a17437c248c1b7fce57023e8ae33, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/A/0f6388d96f4542a5b8c27049dc9e4aaa, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/A/d578d4c3d0d24f3b876898bd5904bcda, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/A/bc8ae7dc610d4b2d8d534187ed89f250, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/A/6a273b33fe5a490f9007c4b1f7e299d7, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/A/c201f29c244c45fd99554f2be8ff45bc, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/A/d6f6a7ca88ea4495a9cbdd21fc7bca39, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/A/f8ef894924c14f5bbc993c44512aa2bb, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/A/143a0bccaa4e4452b2b8336368a90149, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/A/0312b054b33a4e95b9b38e32ac15a467, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/A/d339be6ae5da4a43811ec2af2806d36c, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/A/67414dbe47e8495f9265a61612cb1fbf, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/A/7703aaa26c9740eea92666e564fa1f48, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/A/a02c466717394270807d8f402d1ba2a3, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/A/c30b0dae05bc43dfb38abc21218cad7b, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/A/b9614d9f3b0e47fc8ac1ef2d49fd32d0, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/A/1fbe33e68d2f48858d1b80ac3e9cb16e, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/A/c5590d8e154245279e74d0f0afe2d0c2, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/A/481ec626f7ef4ea99031d4ef80f51b4e] to archive 2024-12-06T10:17:50,134 DEBUG [StoreCloser-TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-12-06T10:17:50,136 DEBUG [StoreCloser-TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/A/2b5b6b570a4f4312a0ccd0dbe302639d to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/A/2b5b6b570a4f4312a0ccd0dbe302639d 2024-12-06T10:17:50,137 DEBUG [StoreCloser-TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/A/d391df585f7140bd8830cc9e35b1ceb6 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/A/d391df585f7140bd8830cc9e35b1ceb6 2024-12-06T10:17:50,138 DEBUG [StoreCloser-TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/A/997320fd15344913abb9d00b7c0ce718 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/A/997320fd15344913abb9d00b7c0ce718 2024-12-06T10:17:50,139 DEBUG [StoreCloser-TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/A/fd2a41dfc2d1455b98e91cad56cf20c0 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/A/fd2a41dfc2d1455b98e91cad56cf20c0 2024-12-06T10:17:50,139 DEBUG [StoreCloser-TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/A/064931b8b64440a5a32b69f20fb43a97 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/A/064931b8b64440a5a32b69f20fb43a97 2024-12-06T10:17:50,140 DEBUG [StoreCloser-TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/A/a2b5f018e978499db43971231708c209 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/A/a2b5f018e978499db43971231708c209 2024-12-06T10:17:50,141 DEBUG [StoreCloser-TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/A/672acde59450412cb03bf21f25565d91 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/A/672acde59450412cb03bf21f25565d91 2024-12-06T10:17:50,142 DEBUG [StoreCloser-TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/A/939ada9d144b44088aef2c15bc5ee6a9 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/A/939ada9d144b44088aef2c15bc5ee6a9 2024-12-06T10:17:50,143 DEBUG [StoreCloser-TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/A/7895548bed4c48acbb7b8368b602c642 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/A/7895548bed4c48acbb7b8368b602c642 2024-12-06T10:17:50,144 DEBUG [StoreCloser-TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/A/7813153bf789489d9e168fc5e3dcece1 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/A/7813153bf789489d9e168fc5e3dcece1 2024-12-06T10:17:50,145 DEBUG [StoreCloser-TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/A/7699a17437c248c1b7fce57023e8ae33 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/A/7699a17437c248c1b7fce57023e8ae33 2024-12-06T10:17:50,146 DEBUG [StoreCloser-TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/A/0f6388d96f4542a5b8c27049dc9e4aaa to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/A/0f6388d96f4542a5b8c27049dc9e4aaa 2024-12-06T10:17:50,147 DEBUG [StoreCloser-TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/A/d578d4c3d0d24f3b876898bd5904bcda to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/A/d578d4c3d0d24f3b876898bd5904bcda 2024-12-06T10:17:50,148 DEBUG [StoreCloser-TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/A/bc8ae7dc610d4b2d8d534187ed89f250 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/A/bc8ae7dc610d4b2d8d534187ed89f250 2024-12-06T10:17:50,149 DEBUG [StoreCloser-TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/A/6a273b33fe5a490f9007c4b1f7e299d7 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/A/6a273b33fe5a490f9007c4b1f7e299d7 2024-12-06T10:17:50,150 DEBUG [StoreCloser-TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/A/c201f29c244c45fd99554f2be8ff45bc to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/A/c201f29c244c45fd99554f2be8ff45bc 2024-12-06T10:17:50,151 DEBUG [StoreCloser-TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/A/d6f6a7ca88ea4495a9cbdd21fc7bca39 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/A/d6f6a7ca88ea4495a9cbdd21fc7bca39 2024-12-06T10:17:50,152 DEBUG [StoreCloser-TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/A/f8ef894924c14f5bbc993c44512aa2bb to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/A/f8ef894924c14f5bbc993c44512aa2bb 2024-12-06T10:17:50,153 DEBUG [StoreCloser-TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/A/143a0bccaa4e4452b2b8336368a90149 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/A/143a0bccaa4e4452b2b8336368a90149 2024-12-06T10:17:50,154 DEBUG [StoreCloser-TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/A/0312b054b33a4e95b9b38e32ac15a467 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/A/0312b054b33a4e95b9b38e32ac15a467 2024-12-06T10:17:50,155 DEBUG [StoreCloser-TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/A/d339be6ae5da4a43811ec2af2806d36c to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/A/d339be6ae5da4a43811ec2af2806d36c 2024-12-06T10:17:50,156 DEBUG [StoreCloser-TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/A/67414dbe47e8495f9265a61612cb1fbf to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/A/67414dbe47e8495f9265a61612cb1fbf 2024-12-06T10:17:50,157 DEBUG [StoreCloser-TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/A/7703aaa26c9740eea92666e564fa1f48 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/A/7703aaa26c9740eea92666e564fa1f48 2024-12-06T10:17:50,157 DEBUG [StoreCloser-TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/A/a02c466717394270807d8f402d1ba2a3 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/A/a02c466717394270807d8f402d1ba2a3 2024-12-06T10:17:50,158 DEBUG [StoreCloser-TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/A/c30b0dae05bc43dfb38abc21218cad7b to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/A/c30b0dae05bc43dfb38abc21218cad7b 2024-12-06T10:17:50,159 DEBUG [StoreCloser-TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/A/b9614d9f3b0e47fc8ac1ef2d49fd32d0 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/A/b9614d9f3b0e47fc8ac1ef2d49fd32d0 2024-12-06T10:17:50,160 DEBUG [StoreCloser-TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/A/1fbe33e68d2f48858d1b80ac3e9cb16e to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/A/1fbe33e68d2f48858d1b80ac3e9cb16e 2024-12-06T10:17:50,161 DEBUG [StoreCloser-TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/A/c5590d8e154245279e74d0f0afe2d0c2 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/A/c5590d8e154245279e74d0f0afe2d0c2 2024-12-06T10:17:50,162 DEBUG [StoreCloser-TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/A/481ec626f7ef4ea99031d4ef80f51b4e to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/A/481ec626f7ef4ea99031d4ef80f51b4e 2024-12-06T10:17:50,163 DEBUG [StoreCloser-TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/B/cce5f249efc844d19596f4d099c1497c, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/B/d1eee93923bf49c2a22b8e73597c594e, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/B/76cc0e0d7f504dfcbd00ff624de9d565, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/B/54fe9dde453d4b2ca2b7257384f16459, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/B/75dee3f4c0164e59aac118b2181f301f, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/B/73f15746812c411cb342f806e3554220, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/B/61f56e64c7cd4182adb4b6a0c122e7b3, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/B/6416d8f693154d1ca2261afad97bc58d, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/B/4642c961777f48ed80f7e220e02936d9, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/B/e50f4765b8634e0c9e29c8f6746b3187, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/B/ecdd551d849c4eef91237be3d0585d05, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/B/d7b3425a23bb40b586ea3832c9aec931, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/B/b260896510a44fa5a5aec4232637e3ce, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/B/f86afad75dff4bbc8537aa2bf3f6baed, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/B/50ec457f2e8b41489055ad088bdf0737, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/B/200d4195e41746878612d51bb14f49f2, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/B/a631ae2750414318af30c4bee5c4cb4f, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/B/320c23b6ccc04d07b04f0ca0e67df7b2, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/B/5446ed762a054e5ea7c2051ba8781db9, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/B/558cc231625943629b43e6bddf10ca39, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/B/6ed9a1d06f1144b9b703079c14e8971b, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/B/60b86089fe1f455c95b26aab7a65b418, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/B/f1f4ede83afe4410ab17ed54b99bde65, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/B/3d3f7aa0a66443e2afe702d99aacb102, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/B/82ea790d3a56430c9afefe432a9d08f2, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/B/902f66c01a8842629f3bd2c06c1f19a6, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/B/b40c78e9ebac40e0ab4e47112d9e5e91, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/B/28d3b86d569647448ffe3ce308a44d58, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/B/6c1e24b6e5de448b9a6b3106e72ecfea] to archive 2024-12-06T10:17:50,164 DEBUG [StoreCloser-TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-12-06T10:17:50,165 DEBUG [StoreCloser-TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/B/cce5f249efc844d19596f4d099c1497c to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/B/cce5f249efc844d19596f4d099c1497c 2024-12-06T10:17:50,166 DEBUG [StoreCloser-TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/B/d1eee93923bf49c2a22b8e73597c594e to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/B/d1eee93923bf49c2a22b8e73597c594e 2024-12-06T10:17:50,167 DEBUG [StoreCloser-TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/B/76cc0e0d7f504dfcbd00ff624de9d565 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/B/76cc0e0d7f504dfcbd00ff624de9d565 2024-12-06T10:17:50,168 DEBUG [StoreCloser-TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/B/54fe9dde453d4b2ca2b7257384f16459 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/B/54fe9dde453d4b2ca2b7257384f16459 2024-12-06T10:17:50,169 DEBUG [StoreCloser-TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/B/75dee3f4c0164e59aac118b2181f301f to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/B/75dee3f4c0164e59aac118b2181f301f 2024-12-06T10:17:50,170 DEBUG [StoreCloser-TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/B/73f15746812c411cb342f806e3554220 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/B/73f15746812c411cb342f806e3554220 2024-12-06T10:17:50,171 DEBUG [StoreCloser-TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/B/61f56e64c7cd4182adb4b6a0c122e7b3 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/B/61f56e64c7cd4182adb4b6a0c122e7b3 2024-12-06T10:17:50,172 DEBUG [StoreCloser-TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/B/6416d8f693154d1ca2261afad97bc58d to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/B/6416d8f693154d1ca2261afad97bc58d 2024-12-06T10:17:50,172 DEBUG [StoreCloser-TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/B/4642c961777f48ed80f7e220e02936d9 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/B/4642c961777f48ed80f7e220e02936d9 2024-12-06T10:17:50,173 DEBUG [StoreCloser-TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/B/e50f4765b8634e0c9e29c8f6746b3187 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/B/e50f4765b8634e0c9e29c8f6746b3187 2024-12-06T10:17:50,174 DEBUG [StoreCloser-TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/B/ecdd551d849c4eef91237be3d0585d05 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/B/ecdd551d849c4eef91237be3d0585d05 2024-12-06T10:17:50,176 DEBUG [StoreCloser-TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/B/d7b3425a23bb40b586ea3832c9aec931 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/B/d7b3425a23bb40b586ea3832c9aec931 2024-12-06T10:17:50,177 DEBUG [StoreCloser-TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/B/b260896510a44fa5a5aec4232637e3ce to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/B/b260896510a44fa5a5aec4232637e3ce 2024-12-06T10:17:50,177 DEBUG [StoreCloser-TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/B/f86afad75dff4bbc8537aa2bf3f6baed to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/B/f86afad75dff4bbc8537aa2bf3f6baed 2024-12-06T10:17:50,178 DEBUG [StoreCloser-TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/B/50ec457f2e8b41489055ad088bdf0737 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/B/50ec457f2e8b41489055ad088bdf0737 2024-12-06T10:17:50,179 DEBUG [StoreCloser-TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/B/200d4195e41746878612d51bb14f49f2 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/B/200d4195e41746878612d51bb14f49f2 2024-12-06T10:17:50,180 DEBUG [StoreCloser-TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/B/a631ae2750414318af30c4bee5c4cb4f to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/B/a631ae2750414318af30c4bee5c4cb4f 2024-12-06T10:17:50,181 DEBUG [StoreCloser-TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/B/320c23b6ccc04d07b04f0ca0e67df7b2 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/B/320c23b6ccc04d07b04f0ca0e67df7b2 2024-12-06T10:17:50,182 DEBUG [StoreCloser-TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/B/5446ed762a054e5ea7c2051ba8781db9 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/B/5446ed762a054e5ea7c2051ba8781db9 2024-12-06T10:17:50,183 DEBUG [StoreCloser-TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/B/558cc231625943629b43e6bddf10ca39 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/B/558cc231625943629b43e6bddf10ca39 2024-12-06T10:17:50,184 DEBUG [StoreCloser-TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/B/6ed9a1d06f1144b9b703079c14e8971b to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/B/6ed9a1d06f1144b9b703079c14e8971b 2024-12-06T10:17:50,185 DEBUG [StoreCloser-TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/B/60b86089fe1f455c95b26aab7a65b418 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/B/60b86089fe1f455c95b26aab7a65b418 2024-12-06T10:17:50,186 DEBUG [StoreCloser-TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/B/f1f4ede83afe4410ab17ed54b99bde65 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/B/f1f4ede83afe4410ab17ed54b99bde65 2024-12-06T10:17:50,186 DEBUG [StoreCloser-TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/B/3d3f7aa0a66443e2afe702d99aacb102 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/B/3d3f7aa0a66443e2afe702d99aacb102 2024-12-06T10:17:50,187 DEBUG [StoreCloser-TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/B/82ea790d3a56430c9afefe432a9d08f2 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/B/82ea790d3a56430c9afefe432a9d08f2 2024-12-06T10:17:50,188 DEBUG [StoreCloser-TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/B/902f66c01a8842629f3bd2c06c1f19a6 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/B/902f66c01a8842629f3bd2c06c1f19a6 2024-12-06T10:17:50,189 DEBUG [StoreCloser-TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/B/b40c78e9ebac40e0ab4e47112d9e5e91 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/B/b40c78e9ebac40e0ab4e47112d9e5e91 2024-12-06T10:17:50,190 DEBUG [StoreCloser-TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/B/28d3b86d569647448ffe3ce308a44d58 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/B/28d3b86d569647448ffe3ce308a44d58 2024-12-06T10:17:50,191 DEBUG [StoreCloser-TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/B/6c1e24b6e5de448b9a6b3106e72ecfea to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/B/6c1e24b6e5de448b9a6b3106e72ecfea 2024-12-06T10:17:50,192 DEBUG [StoreCloser-TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/C/e1abb72ba51d40f2a3505efdad69b47c, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/C/03dd1168286845ddb6abbe10904e3e5a, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/C/aff476f723de4555845a28597189711e, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/C/b482c3d0b7554e35996b8da20c4dce86, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/C/c1c52fe299344af2b18c548144278bfc, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/C/042d1fb260e741b4a1999ed5e135fb2c, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/C/08b016b343d2406d9172dfff65942a9f, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/C/2fc8d4f88e6441179a740b2fd7cabfe9, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/C/64030262cb0e4f13892061894630224f, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/C/dc3d1cc317ae41fb96fa0119cfed8cc3, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/C/fb88e4b85e2947cbb307758b80d36890, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/C/52f64a40c89d4c5492673cf0f0c7c247, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/C/098cd0e195024eeda001cbb81079ff8c, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/C/426728a358734bdf8fcb3b20f536e6b2, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/C/2e78db90320c4cfa88e1cc2e912533dd, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/C/064a9cca4f9642d080ddc1c3b6ea6aa3, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/C/5c37337a8c944e2393acb65e187c697f, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/C/4b9ddee9ebed4d96b6d56c51cb01dbf0, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/C/75fc84dcd0f14ac89a26b1757d99d447, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/C/0a940abecb6244f1bec238f03001a345, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/C/b90b1ea1929546c5b7c82b7dc942fe2e, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/C/ee4db79eb349470e8ab624beaeefc158, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/C/8483621323574f5791e8ec9e9f578611, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/C/279de041765b472281ca315e158cf918, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/C/f942246d18f1462a9530ef9aca33f078, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/C/eed35fecf9414d81a832b19ecf1acd2f, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/C/994c2f14d02f42bfa81e25d6d3672ff5, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/C/f4791db864cc4a799a3d332c3e55af10, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/C/824f687360794fcd9bcb79b8588ef24b] to archive 2024-12-06T10:17:50,193 DEBUG [StoreCloser-TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-12-06T10:17:50,194 DEBUG [StoreCloser-TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/C/e1abb72ba51d40f2a3505efdad69b47c to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/C/e1abb72ba51d40f2a3505efdad69b47c 2024-12-06T10:17:50,195 DEBUG [StoreCloser-TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/C/03dd1168286845ddb6abbe10904e3e5a to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/C/03dd1168286845ddb6abbe10904e3e5a 2024-12-06T10:17:50,196 DEBUG [StoreCloser-TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/C/aff476f723de4555845a28597189711e to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/C/aff476f723de4555845a28597189711e 2024-12-06T10:17:50,197 DEBUG [StoreCloser-TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/C/b482c3d0b7554e35996b8da20c4dce86 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/C/b482c3d0b7554e35996b8da20c4dce86 2024-12-06T10:17:50,197 DEBUG [StoreCloser-TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/C/c1c52fe299344af2b18c548144278bfc to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/C/c1c52fe299344af2b18c548144278bfc 2024-12-06T10:17:50,198 DEBUG [StoreCloser-TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/C/042d1fb260e741b4a1999ed5e135fb2c to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/C/042d1fb260e741b4a1999ed5e135fb2c 2024-12-06T10:17:50,199 DEBUG [StoreCloser-TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/C/08b016b343d2406d9172dfff65942a9f to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/C/08b016b343d2406d9172dfff65942a9f 2024-12-06T10:17:50,200 DEBUG [StoreCloser-TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/C/2fc8d4f88e6441179a740b2fd7cabfe9 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/C/2fc8d4f88e6441179a740b2fd7cabfe9 2024-12-06T10:17:50,201 DEBUG [StoreCloser-TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/C/64030262cb0e4f13892061894630224f to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/C/64030262cb0e4f13892061894630224f 2024-12-06T10:17:50,202 DEBUG [StoreCloser-TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/C/dc3d1cc317ae41fb96fa0119cfed8cc3 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/C/dc3d1cc317ae41fb96fa0119cfed8cc3 2024-12-06T10:17:50,203 DEBUG [StoreCloser-TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/C/fb88e4b85e2947cbb307758b80d36890 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/C/fb88e4b85e2947cbb307758b80d36890 2024-12-06T10:17:50,204 DEBUG [StoreCloser-TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/C/52f64a40c89d4c5492673cf0f0c7c247 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/C/52f64a40c89d4c5492673cf0f0c7c247 2024-12-06T10:17:50,204 DEBUG [StoreCloser-TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/C/098cd0e195024eeda001cbb81079ff8c to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/C/098cd0e195024eeda001cbb81079ff8c 2024-12-06T10:17:50,206 DEBUG [StoreCloser-TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/C/426728a358734bdf8fcb3b20f536e6b2 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/C/426728a358734bdf8fcb3b20f536e6b2 2024-12-06T10:17:50,206 DEBUG [StoreCloser-TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/C/2e78db90320c4cfa88e1cc2e912533dd to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/C/2e78db90320c4cfa88e1cc2e912533dd 2024-12-06T10:17:50,207 DEBUG [StoreCloser-TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/C/064a9cca4f9642d080ddc1c3b6ea6aa3 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/C/064a9cca4f9642d080ddc1c3b6ea6aa3 2024-12-06T10:17:50,208 DEBUG [StoreCloser-TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/C/5c37337a8c944e2393acb65e187c697f to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/C/5c37337a8c944e2393acb65e187c697f 2024-12-06T10:17:50,209 DEBUG [StoreCloser-TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/C/4b9ddee9ebed4d96b6d56c51cb01dbf0 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/C/4b9ddee9ebed4d96b6d56c51cb01dbf0 2024-12-06T10:17:50,210 DEBUG [StoreCloser-TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/C/75fc84dcd0f14ac89a26b1757d99d447 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/C/75fc84dcd0f14ac89a26b1757d99d447 2024-12-06T10:17:50,210 DEBUG [StoreCloser-TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/C/0a940abecb6244f1bec238f03001a345 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/C/0a940abecb6244f1bec238f03001a345 2024-12-06T10:17:50,211 DEBUG [StoreCloser-TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/C/b90b1ea1929546c5b7c82b7dc942fe2e to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/C/b90b1ea1929546c5b7c82b7dc942fe2e 2024-12-06T10:17:50,212 DEBUG [StoreCloser-TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/C/ee4db79eb349470e8ab624beaeefc158 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/C/ee4db79eb349470e8ab624beaeefc158 2024-12-06T10:17:50,213 DEBUG [StoreCloser-TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/C/8483621323574f5791e8ec9e9f578611 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/C/8483621323574f5791e8ec9e9f578611 2024-12-06T10:17:50,214 DEBUG [StoreCloser-TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/C/279de041765b472281ca315e158cf918 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/C/279de041765b472281ca315e158cf918 2024-12-06T10:17:50,215 DEBUG [StoreCloser-TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/C/f942246d18f1462a9530ef9aca33f078 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/C/f942246d18f1462a9530ef9aca33f078 2024-12-06T10:17:50,215 DEBUG [StoreCloser-TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/C/eed35fecf9414d81a832b19ecf1acd2f to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/C/eed35fecf9414d81a832b19ecf1acd2f 2024-12-06T10:17:50,216 DEBUG [StoreCloser-TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/C/994c2f14d02f42bfa81e25d6d3672ff5 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/C/994c2f14d02f42bfa81e25d6d3672ff5 2024-12-06T10:17:50,217 DEBUG [StoreCloser-TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/C/f4791db864cc4a799a3d332c3e55af10 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/C/f4791db864cc4a799a3d332c3e55af10 2024-12-06T10:17:50,218 DEBUG [StoreCloser-TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/C/824f687360794fcd9bcb79b8588ef24b to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/C/824f687360794fcd9bcb79b8588ef24b 2024-12-06T10:17:50,222 DEBUG [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION, pid=70}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/recovered.edits/437.seqid, newMaxSeqId=437, maxSeqId=4 2024-12-06T10:17:50,223 INFO [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION, pid=70}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01. 2024-12-06T10:17:50,223 DEBUG [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION, pid=70}] regionserver.HRegion(1635): Region close journal for cba9fa4bb7ad0155608756e918c3bf01: 2024-12-06T10:17:50,224 INFO [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION, pid=70}] handler.UnassignRegionHandler(170): Closed cba9fa4bb7ad0155608756e918c3bf01 2024-12-06T10:17:50,225 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=69 updating hbase:meta row=cba9fa4bb7ad0155608756e918c3bf01, regionState=CLOSED 2024-12-06T10:17:50,226 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=70, resume processing ppid=69 2024-12-06T10:17:50,226 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=70, ppid=69, state=SUCCESS; CloseRegionProcedure cba9fa4bb7ad0155608756e918c3bf01, server=552d6a33fa09,33397,1733480204743 in 1.9050 sec 2024-12-06T10:17:50,227 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=69, resume processing ppid=68 2024-12-06T10:17:50,227 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=69, ppid=68, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=cba9fa4bb7ad0155608756e918c3bf01, UNASSIGN in 1.9080 sec 2024-12-06T10:17:50,229 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=68, resume processing ppid=67 2024-12-06T10:17:50,229 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=68, ppid=67, state=SUCCESS; CloseTableRegionsProcedure table=TestAcidGuarantees in 1.9110 sec 2024-12-06T10:17:50,230 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733480270230"}]},"ts":"1733480270230"} 2024-12-06T10:17:50,230 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLED in hbase:meta 2024-12-06T10:17:50,232 INFO [PEWorker-5 {}] procedure.DisableTableProcedure(296): Set TestAcidGuarantees to state=DISABLED 2024-12-06T10:17:50,234 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=67, state=SUCCESS; DisableTableProcedure table=TestAcidGuarantees in 1.9240 sec 2024-12-06T10:17:50,416 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=67 2024-12-06T10:17:50,416 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:TestAcidGuarantees, procId: 67 completed 2024-12-06T10:17:50,416 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.2 delete TestAcidGuarantees 2024-12-06T10:17:50,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] procedure2.ProcedureExecutor(1098): Stored pid=71, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=TestAcidGuarantees 2024-12-06T10:17:50,418 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=71, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-06T10:17:50,419 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=71 2024-12-06T10:17:50,419 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=71, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-06T10:17:50,420 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01 2024-12-06T10:17:50,423 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/A, FileablePath, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/B, FileablePath, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/C, FileablePath, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/recovered.edits] 2024-12-06T10:17:50,426 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/A/8c09a2471fcd43dcb99730b60f38e55b to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/A/8c09a2471fcd43dcb99730b60f38e55b 2024-12-06T10:17:50,428 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/A/ba68f630b13f4116a2662f74fba04ead to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/A/ba68f630b13f4116a2662f74fba04ead 2024-12-06T10:17:50,429 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/A/ed7f6648f9eb461f8ee49c8647936eae to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/A/ed7f6648f9eb461f8ee49c8647936eae 2024-12-06T10:17:50,432 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/B/7922ad9c5d0b4e8ca413cf01beaf39cf to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/B/7922ad9c5d0b4e8ca413cf01beaf39cf 2024-12-06T10:17:50,433 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/B/87e373b242394b52a678243644391fcc to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/B/87e373b242394b52a678243644391fcc 2024-12-06T10:17:50,435 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/B/9e7577adbe564bf7acbe92a2200c48f4 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/B/9e7577adbe564bf7acbe92a2200c48f4 2024-12-06T10:17:50,438 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/C/272d796f7ccd4ae4a734715294151331 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/C/272d796f7ccd4ae4a734715294151331 2024-12-06T10:17:50,440 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/C/4971f693e24d44bbb720575d1c8b3140 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/C/4971f693e24d44bbb720575d1c8b3140 2024-12-06T10:17:50,441 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/C/af8c58ba2e3b4bffb56cea819e42b3ab to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/C/af8c58ba2e3b4bffb56cea819e42b3ab 2024-12-06T10:17:50,444 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/recovered.edits/437.seqid to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01/recovered.edits/437.seqid 2024-12-06T10:17:50,445 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/cba9fa4bb7ad0155608756e918c3bf01 2024-12-06T10:17:50,445 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(313): Archived TestAcidGuarantees regions 2024-12-06T10:17:50,445 DEBUG [PEWorker-2 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3 2024-12-06T10:17:50,446 DEBUG [PEWorker-2 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A] 2024-12-06T10:17:50,451 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412060006e8ac5913438eae6b6ff775b4ca1b_cba9fa4bb7ad0155608756e918c3bf01 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412060006e8ac5913438eae6b6ff775b4ca1b_cba9fa4bb7ad0155608756e918c3bf01 2024-12-06T10:17:50,453 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412060f002c125a74459ab1654b64a76ecadb_cba9fa4bb7ad0155608756e918c3bf01 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412060f002c125a74459ab1654b64a76ecadb_cba9fa4bb7ad0155608756e918c3bf01 2024-12-06T10:17:50,454 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241206127708b0941e4f8cb35a60ab40a08648_cba9fa4bb7ad0155608756e918c3bf01 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241206127708b0941e4f8cb35a60ab40a08648_cba9fa4bb7ad0155608756e918c3bf01 2024-12-06T10:17:50,455 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412063e48cb394821491ca2297c15c4f9b28e_cba9fa4bb7ad0155608756e918c3bf01 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412063e48cb394821491ca2297c15c4f9b28e_cba9fa4bb7ad0155608756e918c3bf01 2024-12-06T10:17:50,457 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241206409da238d8b04f488aee351cb5aa6add_cba9fa4bb7ad0155608756e918c3bf01 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241206409da238d8b04f488aee351cb5aa6add_cba9fa4bb7ad0155608756e918c3bf01 2024-12-06T10:17:50,459 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024120640a8491bfc2f46a68383014ca8304ae7_cba9fa4bb7ad0155608756e918c3bf01 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024120640a8491bfc2f46a68383014ca8304ae7_cba9fa4bb7ad0155608756e918c3bf01 2024-12-06T10:17:50,461 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241206442b6c7eeaf24942bfc5cb5510f6afac_cba9fa4bb7ad0155608756e918c3bf01 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241206442b6c7eeaf24942bfc5cb5510f6afac_cba9fa4bb7ad0155608756e918c3bf01 2024-12-06T10:17:50,463 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412064d0e44050701487eb9bb00a7015ca838_cba9fa4bb7ad0155608756e918c3bf01 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412064d0e44050701487eb9bb00a7015ca838_cba9fa4bb7ad0155608756e918c3bf01 2024-12-06T10:17:50,464 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241206547c825072f54ca8b859a97383ae2531_cba9fa4bb7ad0155608756e918c3bf01 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241206547c825072f54ca8b859a97383ae2531_cba9fa4bb7ad0155608756e918c3bf01 2024-12-06T10:17:50,465 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412067dc26658fb9b463da0fe0f762f3d5751_cba9fa4bb7ad0155608756e918c3bf01 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412067dc26658fb9b463da0fe0f762f3d5751_cba9fa4bb7ad0155608756e918c3bf01 2024-12-06T10:17:50,466 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024120680a510d9689b4f8093bdb169f48e919a_cba9fa4bb7ad0155608756e918c3bf01 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024120680a510d9689b4f8093bdb169f48e919a_cba9fa4bb7ad0155608756e918c3bf01 2024-12-06T10:17:50,468 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412068818f01578c44474b25db886c4dcbebe_cba9fa4bb7ad0155608756e918c3bf01 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412068818f01578c44474b25db886c4dcbebe_cba9fa4bb7ad0155608756e918c3bf01 2024-12-06T10:17:50,469 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024120689b7355a11a342f087ec0a9e22d75e79_cba9fa4bb7ad0155608756e918c3bf01 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024120689b7355a11a342f087ec0a9e22d75e79_cba9fa4bb7ad0155608756e918c3bf01 2024-12-06T10:17:50,470 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412068a4474191c7e4a1d8f1de21f51977996_cba9fa4bb7ad0155608756e918c3bf01 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412068a4474191c7e4a1d8f1de21f51977996_cba9fa4bb7ad0155608756e918c3bf01 2024-12-06T10:17:50,471 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412068f178dc2f64043fb9ca91273c82cbfcf_cba9fa4bb7ad0155608756e918c3bf01 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412068f178dc2f64043fb9ca91273c82cbfcf_cba9fa4bb7ad0155608756e918c3bf01 2024-12-06T10:17:50,472 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412069a6dd467b2fd4ae08fcdf57ff74497eb_cba9fa4bb7ad0155608756e918c3bf01 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412069a6dd467b2fd4ae08fcdf57ff74497eb_cba9fa4bb7ad0155608756e918c3bf01 2024-12-06T10:17:50,473 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412069efecd0530234cf497666cd6da7256fd_cba9fa4bb7ad0155608756e918c3bf01 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412069efecd0530234cf497666cd6da7256fd_cba9fa4bb7ad0155608756e918c3bf01 2024-12-06T10:17:50,474 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241206a8397f0145db4a779de4396e40d66477_cba9fa4bb7ad0155608756e918c3bf01 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241206a8397f0145db4a779de4396e40d66477_cba9fa4bb7ad0155608756e918c3bf01 2024-12-06T10:17:50,475 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241206ae19fdeb217d4b73a8b3cf5b27c12b7d_cba9fa4bb7ad0155608756e918c3bf01 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241206ae19fdeb217d4b73a8b3cf5b27c12b7d_cba9fa4bb7ad0155608756e918c3bf01 2024-12-06T10:17:50,476 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241206b76df7908c8e40a9a15fad81a10e27bc_cba9fa4bb7ad0155608756e918c3bf01 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241206b76df7908c8e40a9a15fad81a10e27bc_cba9fa4bb7ad0155608756e918c3bf01 2024-12-06T10:17:50,477 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241206e113df2ee3c24b84ad1863ba26c42d71_cba9fa4bb7ad0155608756e918c3bf01 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241206e113df2ee3c24b84ad1863ba26c42d71_cba9fa4bb7ad0155608756e918c3bf01 2024-12-06T10:17:50,478 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241206e43f76e2dee144fd98e9f8b53f2db1a1_cba9fa4bb7ad0155608756e918c3bf01 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241206e43f76e2dee144fd98e9f8b53f2db1a1_cba9fa4bb7ad0155608756e918c3bf01 2024-12-06T10:17:50,479 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241206f7776cb82c8149999814a60beab96a9b_cba9fa4bb7ad0155608756e918c3bf01 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241206f7776cb82c8149999814a60beab96a9b_cba9fa4bb7ad0155608756e918c3bf01 2024-12-06T10:17:50,480 DEBUG [PEWorker-2 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3 2024-12-06T10:17:50,481 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=71, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-06T10:17:50,484 WARN [PEWorker-2 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 1 rows of TestAcidGuarantees from hbase:meta 2024-12-06T10:17:50,486 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(407): Removing 'TestAcidGuarantees' descriptor. 2024-12-06T10:17:50,486 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=71, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-06T10:17:50,486 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(397): Removing 'TestAcidGuarantees' from region states. 2024-12-06T10:17:50,487 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733480270486"}]},"ts":"9223372036854775807"} 2024-12-06T10:17:50,488 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1808): Deleted 1 regions from META 2024-12-06T10:17:50,488 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => cba9fa4bb7ad0155608756e918c3bf01, NAME => 'TestAcidGuarantees,,1733480242870.cba9fa4bb7ad0155608756e918c3bf01.', STARTKEY => '', ENDKEY => ''}] 2024-12-06T10:17:50,488 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(401): Marking 'TestAcidGuarantees' as deleted. 2024-12-06T10:17:50,489 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1733480270488"}]},"ts":"9223372036854775807"} 2024-12-06T10:17:50,491 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1678): Deleted table TestAcidGuarantees state from META 2024-12-06T10:17:50,493 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(133): Finished pid=71, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-06T10:17:50,494 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=71, state=SUCCESS; DeleteTableProcedure table=TestAcidGuarantees in 76 msec 2024-12-06T10:17:50,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=71 2024-12-06T10:17:50,519 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:TestAcidGuarantees, procId: 71 completed 2024-12-06T10:17:50,528 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: TestAcidGuaranteesWithAdaptivePolicy#testMobMixedAtomicity Thread=241 (was 238) Potentially hanging thread: hconnection-0x68a406ef-shared-pool-10 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Async disk worker #0 for volume /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/919d2d74-7337-66ca-1565-c2037856b4c9/cluster_b84c5cb5-887e-107d-c16e-13155fed988e/dfs/data/data1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1982605922_22 at /127.0.0.1:55138 [Waiting for operation #484] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-1360265992_22 at /127.0.0.1:55346 [Waiting for operation #880] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x68a406ef-shared-pool-8 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1982605922_22 at /127.0.0.1:37436 [Waiting for operation #1071] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: region-location-2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Async disk worker #0 for volume /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/919d2d74-7337-66ca-1565-c2037856b4c9/cluster_b84c5cb5-887e-107d-c16e-13155fed988e/dfs/data/data2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-14 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-15 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-16 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-1360265992_22 at /127.0.0.1:32870 [Waiting for operation #1021] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x68a406ef-shared-pool-11 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x68a406ef-shared-pool-9 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=463 (was 451) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=382 (was 316) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=6263 (was 6600) 2024-12-06T10:17:50,536 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: TestAcidGuaranteesWithAdaptivePolicy#testGetAtomicity Thread=241, OpenFileDescriptor=463, MaxFileDescriptor=1048576, SystemLoadAverage=382, ProcessCount=11, AvailableMemoryMB=6262 2024-12-06T10:17:50,538 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-12-06T10:17:50,538 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-06T10:17:50,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] procedure2.ProcedureExecutor(1098): Stored pid=72, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=TestAcidGuarantees 2024-12-06T10:17:50,539 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=72, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_PRE_OPERATION 2024-12-06T10:17:50,540 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:50,540 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestAcidGuarantees" procId is: 72 2024-12-06T10:17:50,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=72 2024-12-06T10:17:50,540 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=72, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-06T10:17:50,545 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742113_1289 (size=963) 2024-12-06T10:17:50,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=72 2024-12-06T10:17:50,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=72 2024-12-06T10:17:50,947 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => d2fcaf53b78370d16571065501f9880b, NAME => 'TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4 2024-12-06T10:17:50,953 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742114_1290 (size=53) 2024-12-06T10:17:51,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=72 2024-12-06T10:17:51,353 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-06T10:17:51,354 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1681): Closing d2fcaf53b78370d16571065501f9880b, disabling compactions & flushes 2024-12-06T10:17:51,354 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b. 2024-12-06T10:17:51,354 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b. 2024-12-06T10:17:51,354 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b. after waiting 0 ms 2024-12-06T10:17:51,354 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b. 2024-12-06T10:17:51,354 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b. 2024-12-06T10:17:51,354 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1635): Region close journal for d2fcaf53b78370d16571065501f9880b: 2024-12-06T10:17:51,355 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=72, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ADD_TO_META 2024-12-06T10:17:51,355 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b.","families":{"info":[{"qualifier":"regioninfo","vlen":52,"tag":[],"timestamp":"1733480271355"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733480271355"}]},"ts":"1733480271355"} 2024-12-06T10:17:51,356 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-12-06T10:17:51,356 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=72, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-06T10:17:51,357 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733480271357"}]},"ts":"1733480271357"} 2024-12-06T10:17:51,357 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLING in hbase:meta 2024-12-06T10:17:51,361 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=73, ppid=72, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=d2fcaf53b78370d16571065501f9880b, ASSIGN}] 2024-12-06T10:17:51,362 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=73, ppid=72, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=d2fcaf53b78370d16571065501f9880b, ASSIGN 2024-12-06T10:17:51,363 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(264): Starting pid=73, ppid=72, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=d2fcaf53b78370d16571065501f9880b, ASSIGN; state=OFFLINE, location=552d6a33fa09,33397,1733480204743; forceNewPlan=false, retain=false 2024-12-06T10:17:51,513 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=73 updating hbase:meta row=d2fcaf53b78370d16571065501f9880b, regionState=OPENING, regionLocation=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:51,514 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=74, ppid=73, state=RUNNABLE; OpenRegionProcedure d2fcaf53b78370d16571065501f9880b, server=552d6a33fa09,33397,1733480204743}] 2024-12-06T10:17:51,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=72 2024-12-06T10:17:51,666 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,33397,1733480204743 2024-12-06T10:17:51,669 INFO [RS_OPEN_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_REGION, pid=74}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b. 2024-12-06T10:17:51,669 DEBUG [RS_OPEN_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_REGION, pid=74}] regionserver.HRegion(7285): Opening region: {ENCODED => d2fcaf53b78370d16571065501f9880b, NAME => 'TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b.', STARTKEY => '', ENDKEY => ''} 2024-12-06T10:17:51,669 DEBUG [RS_OPEN_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_REGION, pid=74}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees d2fcaf53b78370d16571065501f9880b 2024-12-06T10:17:51,669 DEBUG [RS_OPEN_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_REGION, pid=74}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-06T10:17:51,670 DEBUG [RS_OPEN_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_REGION, pid=74}] regionserver.HRegion(7327): checking encryption for d2fcaf53b78370d16571065501f9880b 2024-12-06T10:17:51,670 DEBUG [RS_OPEN_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_REGION, pid=74}] regionserver.HRegion(7330): checking classloading for d2fcaf53b78370d16571065501f9880b 2024-12-06T10:17:51,672 INFO [StoreOpener-d2fcaf53b78370d16571065501f9880b-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region d2fcaf53b78370d16571065501f9880b 2024-12-06T10:17:51,673 INFO [StoreOpener-d2fcaf53b78370d16571065501f9880b-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-06T10:17:51,673 INFO [StoreOpener-d2fcaf53b78370d16571065501f9880b-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region d2fcaf53b78370d16571065501f9880b columnFamilyName A 2024-12-06T10:17:51,673 DEBUG [StoreOpener-d2fcaf53b78370d16571065501f9880b-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:51,674 INFO [StoreOpener-d2fcaf53b78370d16571065501f9880b-1 {}] regionserver.HStore(327): Store=d2fcaf53b78370d16571065501f9880b/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-06T10:17:51,674 INFO [StoreOpener-d2fcaf53b78370d16571065501f9880b-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region d2fcaf53b78370d16571065501f9880b 2024-12-06T10:17:51,675 INFO [StoreOpener-d2fcaf53b78370d16571065501f9880b-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-06T10:17:51,675 INFO [StoreOpener-d2fcaf53b78370d16571065501f9880b-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region d2fcaf53b78370d16571065501f9880b columnFamilyName B 2024-12-06T10:17:51,675 DEBUG [StoreOpener-d2fcaf53b78370d16571065501f9880b-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:51,676 INFO [StoreOpener-d2fcaf53b78370d16571065501f9880b-1 {}] regionserver.HStore(327): Store=d2fcaf53b78370d16571065501f9880b/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-06T10:17:51,676 INFO [StoreOpener-d2fcaf53b78370d16571065501f9880b-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region d2fcaf53b78370d16571065501f9880b 2024-12-06T10:17:51,677 INFO [StoreOpener-d2fcaf53b78370d16571065501f9880b-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-06T10:17:51,677 INFO [StoreOpener-d2fcaf53b78370d16571065501f9880b-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region d2fcaf53b78370d16571065501f9880b columnFamilyName C 2024-12-06T10:17:51,677 DEBUG [StoreOpener-d2fcaf53b78370d16571065501f9880b-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:17:51,678 INFO [StoreOpener-d2fcaf53b78370d16571065501f9880b-1 {}] regionserver.HStore(327): Store=d2fcaf53b78370d16571065501f9880b/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-06T10:17:51,678 INFO [RS_OPEN_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_REGION, pid=74}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b. 2024-12-06T10:17:51,678 DEBUG [RS_OPEN_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_REGION, pid=74}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b 2024-12-06T10:17:51,679 DEBUG [RS_OPEN_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_REGION, pid=74}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b 2024-12-06T10:17:51,680 DEBUG [RS_OPEN_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_REGION, pid=74}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-06T10:17:51,682 DEBUG [RS_OPEN_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_REGION, pid=74}] regionserver.HRegion(1085): writing seq id for d2fcaf53b78370d16571065501f9880b 2024-12-06T10:17:51,692 DEBUG [RS_OPEN_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_REGION, pid=74}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-06T10:17:51,696 INFO [RS_OPEN_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_REGION, pid=74}] regionserver.HRegion(1102): Opened d2fcaf53b78370d16571065501f9880b; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=65006565, jitterRate=-0.031326696276664734}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-06T10:17:51,697 DEBUG [RS_OPEN_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_REGION, pid=74}] regionserver.HRegion(1001): Region open journal for d2fcaf53b78370d16571065501f9880b: 2024-12-06T10:17:51,698 INFO [RS_OPEN_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_REGION, pid=74}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b., pid=74, masterSystemTime=1733480271666 2024-12-06T10:17:51,709 DEBUG [RS_OPEN_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_REGION, pid=74}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b. 2024-12-06T10:17:51,709 INFO [RS_OPEN_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_REGION, pid=74}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b. 2024-12-06T10:17:51,709 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=73 updating hbase:meta row=d2fcaf53b78370d16571065501f9880b, regionState=OPEN, openSeqNum=2, regionLocation=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:51,712 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=74, resume processing ppid=73 2024-12-06T10:17:51,712 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=74, ppid=73, state=SUCCESS; OpenRegionProcedure d2fcaf53b78370d16571065501f9880b, server=552d6a33fa09,33397,1733480204743 in 196 msec 2024-12-06T10:17:51,713 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=73, resume processing ppid=72 2024-12-06T10:17:51,713 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=73, ppid=72, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=d2fcaf53b78370d16571065501f9880b, ASSIGN in 351 msec 2024-12-06T10:17:51,714 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=72, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-06T10:17:51,714 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733480271714"}]},"ts":"1733480271714"} 2024-12-06T10:17:51,715 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLED in hbase:meta 2024-12-06T10:17:51,718 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=72, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_POST_OPERATION 2024-12-06T10:17:51,719 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=72, state=SUCCESS; CreateTableProcedure table=TestAcidGuarantees in 1.1800 sec 2024-12-06T10:17:52,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=72 2024-12-06T10:17:52,645 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:TestAcidGuarantees, procId: 72 completed 2024-12-06T10:17:52,646 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x58341641 to 127.0.0.1:61610 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@17b6adc5 2024-12-06T10:17:52,651 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3a569490, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-06T10:17:52,652 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-06T10:17:52,653 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:42414, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-06T10:17:52,654 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-06T10:17:52,655 INFO [RS-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49620, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-06T10:17:52,658 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x44645c55 to 127.0.0.1:61610 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@669e1999 2024-12-06T10:17:52,661 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6862e3ce, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-06T10:17:52,662 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x64ee0130 to 127.0.0.1:61610 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@72aa9ee5 2024-12-06T10:17:52,669 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@d296fed, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-06T10:17:52,670 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x683b64c3 to 127.0.0.1:61610 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@4ec09297 2024-12-06T10:17:52,674 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@8d0caa5, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-06T10:17:52,675 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x07e55eb7 to 127.0.0.1:61610 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@4dfb20f6 2024-12-06T10:17:52,679 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@43f04e0e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-06T10:17:52,680 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x03a703d2 to 127.0.0.1:61610 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@17cf7fc0 2024-12-06T10:17:52,683 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@560ec309, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-06T10:17:52,684 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x78b04266 to 127.0.0.1:61610 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@5886c0f2 2024-12-06T10:17:52,687 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@eb04aeb, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-06T10:17:52,688 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x088aa519 to 127.0.0.1:61610 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@66e575aa 2024-12-06T10:17:52,690 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6a0e9c8f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-06T10:17:52,691 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x5e998dd3 to 127.0.0.1:61610 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@131ceb8f 2024-12-06T10:17:52,695 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@d68f787, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-06T10:17:52,695 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x2e4c79b8 to 127.0.0.1:61610 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@5a78bf6d 2024-12-06T10:17:52,698 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@10e6bf6a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-06T10:17:52,700 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x2d1403c3 to 127.0.0.1:61610 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@328852db 2024-12-06T10:17:52,703 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1730a60f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-06T10:17:52,706 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-06T10:17:52,707 DEBUG [hconnection-0xbe116ae-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-06T10:17:52,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] procedure2.ProcedureExecutor(1098): Stored pid=75, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=75, table=TestAcidGuarantees 2024-12-06T10:17:52,708 DEBUG [hconnection-0x62ae779d-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-06T10:17:52,708 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48052, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-06T10:17:52,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=75 2024-12-06T10:17:52,709 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=75, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=75, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-06T10:17:52,709 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48068, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-06T10:17:52,710 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=75, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=75, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-06T10:17:52,710 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=76, ppid=75, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-06T10:17:52,716 DEBUG [hconnection-0x324507ba-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-06T10:17:52,717 DEBUG [hconnection-0x556e48b-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-06T10:17:52,717 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48072, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-06T10:17:52,718 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48084, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-06T10:17:52,719 DEBUG [hconnection-0x9a747c4-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-06T10:17:52,720 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48098, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-06T10:17:52,728 DEBUG [hconnection-0x573dc4a7-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-06T10:17:52,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(8581): Flush requested on d2fcaf53b78370d16571065501f9880b 2024-12-06T10:17:52,728 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing d2fcaf53b78370d16571065501f9880b 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-12-06T10:17:52,728 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d2fcaf53b78370d16571065501f9880b, store=A 2024-12-06T10:17:52,729 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:17:52,729 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d2fcaf53b78370d16571065501f9880b, store=B 2024-12-06T10:17:52,729 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:17:52,729 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d2fcaf53b78370d16571065501f9880b, store=C 2024-12-06T10:17:52,729 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:17:52,729 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48114, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-06T10:17:52,729 DEBUG [hconnection-0x67bf82a-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-06T10:17:52,732 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48122, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-06T10:17:52,762 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/.tmp/A/2d68d28dbd6949318fca792f0e19ddca is 50, key is test_row_0/A:col10/1733480272727/Put/seqid=0 2024-12-06T10:17:52,763 DEBUG [hconnection-0x4fece3f0-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-06T10:17:52,765 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48138, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-06T10:17:52,768 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2fcaf53b78370d16571065501f9880b, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:52,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48052 deadline: 1733480332764, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2fcaf53b78370d16571065501f9880b, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:52,768 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2fcaf53b78370d16571065501f9880b, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:52,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48084 deadline: 1733480332765, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2fcaf53b78370d16571065501f9880b, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:52,768 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2fcaf53b78370d16571065501f9880b, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:52,769 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2fcaf53b78370d16571065501f9880b, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:52,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48122 deadline: 1733480332767, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2fcaf53b78370d16571065501f9880b, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:52,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 2 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48138 deadline: 1733480332767, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2fcaf53b78370d16571065501f9880b, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:52,780 DEBUG [hconnection-0x46311e1d-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-06T10:17:52,781 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48148, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-06T10:17:52,800 DEBUG [hconnection-0x11eda4f-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-06T10:17:52,802 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48160, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-06T10:17:52,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=75 2024-12-06T10:17:52,815 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2fcaf53b78370d16571065501f9880b, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:52,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 2 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48160 deadline: 1733480332813, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2fcaf53b78370d16571065501f9880b, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:52,821 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742115_1291 (size=12001) 2024-12-06T10:17:52,823 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=13 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/.tmp/A/2d68d28dbd6949318fca792f0e19ddca 2024-12-06T10:17:52,858 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/.tmp/B/0b25f74849814bc48784f8fdd481131b is 50, key is test_row_0/B:col10/1733480272727/Put/seqid=0 2024-12-06T10:17:52,867 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,33397,1733480204743 2024-12-06T10:17:52,868 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33397 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=76 2024-12-06T10:17:52,868 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b. 2024-12-06T10:17:52,868 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b. as already flushing 2024-12-06T10:17:52,868 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b. 2024-12-06T10:17:52,868 ERROR [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] handler.RSProcedureHandler(58): pid=76 java.io.IOException: Unable to complete flush {ENCODED => d2fcaf53b78370d16571065501f9880b, NAME => 'TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:17:52,868 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=76 java.io.IOException: Unable to complete flush {ENCODED => d2fcaf53b78370d16571065501f9880b, NAME => 'TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:17:52,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.HMaster(4114): Remote procedure failed, pid=76 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d2fcaf53b78370d16571065501f9880b, NAME => 'TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d2fcaf53b78370d16571065501f9880b, NAME => 'TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:17:52,872 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2fcaf53b78370d16571065501f9880b, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:52,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48122 deadline: 1733480332870, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2fcaf53b78370d16571065501f9880b, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:52,872 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2fcaf53b78370d16571065501f9880b, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:52,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48052 deadline: 1733480332870, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2fcaf53b78370d16571065501f9880b, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:52,874 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2fcaf53b78370d16571065501f9880b, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:52,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48084 deadline: 1733480332871, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2fcaf53b78370d16571065501f9880b, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:52,874 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2fcaf53b78370d16571065501f9880b, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:52,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 4 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48138 deadline: 1733480332872, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2fcaf53b78370d16571065501f9880b, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:52,888 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742116_1292 (size=12001) 2024-12-06T10:17:52,889 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=13 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/.tmp/B/0b25f74849814bc48784f8fdd481131b 2024-12-06T10:17:52,920 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2fcaf53b78370d16571065501f9880b, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:52,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 4 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48160 deadline: 1733480332917, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2fcaf53b78370d16571065501f9880b, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:52,924 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/.tmp/C/04d5bcc116024343a214486e6cd1ab35 is 50, key is test_row_0/C:col10/1733480272727/Put/seqid=0 2024-12-06T10:17:52,974 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742117_1293 (size=12001) 2024-12-06T10:17:53,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=75 2024-12-06T10:17:53,021 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,33397,1733480204743 2024-12-06T10:17:53,032 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33397 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=76 2024-12-06T10:17:53,032 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b. 2024-12-06T10:17:53,032 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b. as already flushing 2024-12-06T10:17:53,032 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b. 2024-12-06T10:17:53,032 ERROR [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] handler.RSProcedureHandler(58): pid=76 java.io.IOException: Unable to complete flush {ENCODED => d2fcaf53b78370d16571065501f9880b, NAME => 'TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:17:53,033 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=76 java.io.IOException: Unable to complete flush {ENCODED => d2fcaf53b78370d16571065501f9880b, NAME => 'TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:17:53,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.HMaster(4114): Remote procedure failed, pid=76 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d2fcaf53b78370d16571065501f9880b, NAME => 'TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d2fcaf53b78370d16571065501f9880b, NAME => 'TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:17:53,074 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2fcaf53b78370d16571065501f9880b, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:53,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48122 deadline: 1733480333073, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2fcaf53b78370d16571065501f9880b, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:53,077 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2fcaf53b78370d16571065501f9880b, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:53,077 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2fcaf53b78370d16571065501f9880b, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:53,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48052 deadline: 1733480333076, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2fcaf53b78370d16571065501f9880b, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:53,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 6 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48138 deadline: 1733480333076, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2fcaf53b78370d16571065501f9880b, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:53,078 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2fcaf53b78370d16571065501f9880b, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:53,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48084 deadline: 1733480333076, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2fcaf53b78370d16571065501f9880b, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:53,123 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2fcaf53b78370d16571065501f9880b, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:53,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 6 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48160 deadline: 1733480333121, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2fcaf53b78370d16571065501f9880b, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:53,186 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,33397,1733480204743 2024-12-06T10:17:53,186 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33397 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=76 2024-12-06T10:17:53,187 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b. 2024-12-06T10:17:53,187 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b. as already flushing 2024-12-06T10:17:53,187 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b. 2024-12-06T10:17:53,187 ERROR [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] handler.RSProcedureHandler(58): pid=76 java.io.IOException: Unable to complete flush {ENCODED => d2fcaf53b78370d16571065501f9880b, NAME => 'TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:17:53,187 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=76 java.io.IOException: Unable to complete flush {ENCODED => d2fcaf53b78370d16571065501f9880b, NAME => 'TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:17:53,188 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.HMaster(4114): Remote procedure failed, pid=76 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d2fcaf53b78370d16571065501f9880b, NAME => 'TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d2fcaf53b78370d16571065501f9880b, NAME => 'TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:17:53,311 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=75 2024-12-06T10:17:53,339 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,33397,1733480204743 2024-12-06T10:17:53,340 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33397 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=76 2024-12-06T10:17:53,340 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b. 2024-12-06T10:17:53,341 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b. as already flushing 2024-12-06T10:17:53,341 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b. 2024-12-06T10:17:53,341 ERROR [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] handler.RSProcedureHandler(58): pid=76 java.io.IOException: Unable to complete flush {ENCODED => d2fcaf53b78370d16571065501f9880b, NAME => 'TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:17:53,341 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=76 java.io.IOException: Unable to complete flush {ENCODED => d2fcaf53b78370d16571065501f9880b, NAME => 'TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:17:53,342 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.HMaster(4114): Remote procedure failed, pid=76 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d2fcaf53b78370d16571065501f9880b, NAME => 'TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d2fcaf53b78370d16571065501f9880b, NAME => 'TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:17:53,374 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=13 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/.tmp/C/04d5bcc116024343a214486e6cd1ab35 2024-12-06T10:17:53,378 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2fcaf53b78370d16571065501f9880b, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:53,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48122 deadline: 1733480333377, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2fcaf53b78370d16571065501f9880b, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:53,379 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/.tmp/A/2d68d28dbd6949318fca792f0e19ddca as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/A/2d68d28dbd6949318fca792f0e19ddca 2024-12-06T10:17:53,381 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2fcaf53b78370d16571065501f9880b, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:53,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48138 deadline: 1733480333380, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2fcaf53b78370d16571065501f9880b, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:53,381 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2fcaf53b78370d16571065501f9880b, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:53,382 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48052 deadline: 1733480333380, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2fcaf53b78370d16571065501f9880b, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:53,382 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2fcaf53b78370d16571065501f9880b, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:53,382 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48084 deadline: 1733480333381, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2fcaf53b78370d16571065501f9880b, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:53,385 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/A/2d68d28dbd6949318fca792f0e19ddca, entries=150, sequenceid=13, filesize=11.7 K 2024-12-06T10:17:53,386 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/.tmp/B/0b25f74849814bc48784f8fdd481131b as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/B/0b25f74849814bc48784f8fdd481131b 2024-12-06T10:17:53,390 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/B/0b25f74849814bc48784f8fdd481131b, entries=150, sequenceid=13, filesize=11.7 K 2024-12-06T10:17:53,391 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/.tmp/C/04d5bcc116024343a214486e6cd1ab35 as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/C/04d5bcc116024343a214486e6cd1ab35 2024-12-06T10:17:53,395 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/C/04d5bcc116024343a214486e6cd1ab35, entries=150, sequenceid=13, filesize=11.7 K 2024-12-06T10:17:53,396 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=147.60 KB/151140 for d2fcaf53b78370d16571065501f9880b in 668ms, sequenceid=13, compaction requested=false 2024-12-06T10:17:53,396 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for d2fcaf53b78370d16571065501f9880b: 2024-12-06T10:17:53,428 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(8581): Flush requested on d2fcaf53b78370d16571065501f9880b 2024-12-06T10:17:53,428 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing d2fcaf53b78370d16571065501f9880b 3/3 column families, dataSize=154.31 KB heapSize=405.05 KB 2024-12-06T10:17:53,428 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d2fcaf53b78370d16571065501f9880b, store=A 2024-12-06T10:17:53,429 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:17:53,429 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d2fcaf53b78370d16571065501f9880b, store=B 2024-12-06T10:17:53,429 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:17:53,429 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d2fcaf53b78370d16571065501f9880b, store=C 2024-12-06T10:17:53,429 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:17:53,438 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/.tmp/A/467650f799504aef96acc78c92d527f1 is 50, key is test_row_0/A:col10/1733480272762/Put/seqid=0 2024-12-06T10:17:53,448 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742118_1294 (size=12001) 2024-12-06T10:17:53,452 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=39 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/.tmp/A/467650f799504aef96acc78c92d527f1 2024-12-06T10:17:53,453 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2fcaf53b78370d16571065501f9880b, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:53,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48160 deadline: 1733480333452, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2fcaf53b78370d16571065501f9880b, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:53,461 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/.tmp/B/c9a39067726d44c696cab789b22d20b7 is 50, key is test_row_0/B:col10/1733480272762/Put/seqid=0 2024-12-06T10:17:53,473 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742119_1295 (size=12001) 2024-12-06T10:17:53,474 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=39 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/.tmp/B/c9a39067726d44c696cab789b22d20b7 2024-12-06T10:17:53,488 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/.tmp/C/818d811852df485ea360cb1a7a946468 is 50, key is test_row_0/C:col10/1733480272762/Put/seqid=0 2024-12-06T10:17:53,493 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,33397,1733480204743 2024-12-06T10:17:53,494 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33397 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=76 2024-12-06T10:17:53,494 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b. 2024-12-06T10:17:53,494 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b. as already flushing 2024-12-06T10:17:53,494 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b. 2024-12-06T10:17:53,494 ERROR [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] handler.RSProcedureHandler(58): pid=76 java.io.IOException: Unable to complete flush {ENCODED => d2fcaf53b78370d16571065501f9880b, NAME => 'TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:17:53,494 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=76 java.io.IOException: Unable to complete flush {ENCODED => d2fcaf53b78370d16571065501f9880b, NAME => 'TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:17:53,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.HMaster(4114): Remote procedure failed, pid=76 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d2fcaf53b78370d16571065501f9880b, NAME => 'TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d2fcaf53b78370d16571065501f9880b, NAME => 'TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:17:53,509 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742120_1296 (size=12001) 2024-12-06T10:17:53,510 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=39 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/.tmp/C/818d811852df485ea360cb1a7a946468 2024-12-06T10:17:53,516 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/.tmp/A/467650f799504aef96acc78c92d527f1 as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/A/467650f799504aef96acc78c92d527f1 2024-12-06T10:17:53,522 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/A/467650f799504aef96acc78c92d527f1, entries=150, sequenceid=39, filesize=11.7 K 2024-12-06T10:17:53,523 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/.tmp/B/c9a39067726d44c696cab789b22d20b7 as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/B/c9a39067726d44c696cab789b22d20b7 2024-12-06T10:17:53,528 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/B/c9a39067726d44c696cab789b22d20b7, entries=150, sequenceid=39, filesize=11.7 K 2024-12-06T10:17:53,529 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/.tmp/C/818d811852df485ea360cb1a7a946468 as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/C/818d811852df485ea360cb1a7a946468 2024-12-06T10:17:53,534 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/C/818d811852df485ea360cb1a7a946468, entries=150, sequenceid=39, filesize=11.7 K 2024-12-06T10:17:53,535 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~154.31 KB/158010, heapSize ~405 KB/414720, currentSize=46.96 KB/48090 for d2fcaf53b78370d16571065501f9880b in 107ms, sequenceid=39, compaction requested=false 2024-12-06T10:17:53,535 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for d2fcaf53b78370d16571065501f9880b: 2024-12-06T10:17:53,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(8581): Flush requested on d2fcaf53b78370d16571065501f9880b 2024-12-06T10:17:53,556 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing d2fcaf53b78370d16571065501f9880b 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-06T10:17:53,556 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d2fcaf53b78370d16571065501f9880b, store=A 2024-12-06T10:17:53,557 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:17:53,557 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d2fcaf53b78370d16571065501f9880b, store=B 2024-12-06T10:17:53,557 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:17:53,557 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d2fcaf53b78370d16571065501f9880b, store=C 2024-12-06T10:17:53,557 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:17:53,561 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/.tmp/A/7888a79dcea74802ba0695d1927ced71 is 50, key is test_row_0/A:col10/1733480273443/Put/seqid=0 2024-12-06T10:17:53,577 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742121_1297 (size=14341) 2024-12-06T10:17:53,578 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=50 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/.tmp/A/7888a79dcea74802ba0695d1927ced71 2024-12-06T10:17:53,587 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/.tmp/B/91120bcc98a244bda205ba8c69b31bb3 is 50, key is test_row_0/B:col10/1733480273443/Put/seqid=0 2024-12-06T10:17:53,598 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742122_1298 (size=12001) 2024-12-06T10:17:53,599 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=50 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/.tmp/B/91120bcc98a244bda205ba8c69b31bb3 2024-12-06T10:17:53,614 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/.tmp/C/6a32c6c29a1d403393220e0da06cbf6b is 50, key is test_row_0/C:col10/1733480273443/Put/seqid=0 2024-12-06T10:17:53,626 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2fcaf53b78370d16571065501f9880b, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:53,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48160 deadline: 1733480333624, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2fcaf53b78370d16571065501f9880b, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:53,633 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742123_1299 (size=12001) 2024-12-06T10:17:53,646 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,33397,1733480204743 2024-12-06T10:17:53,647 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33397 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=76 2024-12-06T10:17:53,647 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b. 2024-12-06T10:17:53,647 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b. as already flushing 2024-12-06T10:17:53,647 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b. 2024-12-06T10:17:53,647 ERROR [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] handler.RSProcedureHandler(58): pid=76 java.io.IOException: Unable to complete flush {ENCODED => d2fcaf53b78370d16571065501f9880b, NAME => 'TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:17:53,647 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=76 java.io.IOException: Unable to complete flush {ENCODED => d2fcaf53b78370d16571065501f9880b, NAME => 'TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:17:53,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.HMaster(4114): Remote procedure failed, pid=76 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d2fcaf53b78370d16571065501f9880b, NAME => 'TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d2fcaf53b78370d16571065501f9880b, NAME => 'TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:17:53,729 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2fcaf53b78370d16571065501f9880b, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:53,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48160 deadline: 1733480333728, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2fcaf53b78370d16571065501f9880b, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:53,799 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,33397,1733480204743 2024-12-06T10:17:53,800 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33397 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=76 2024-12-06T10:17:53,800 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b. 2024-12-06T10:17:53,800 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b. as already flushing 2024-12-06T10:17:53,800 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b. 2024-12-06T10:17:53,800 ERROR [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] handler.RSProcedureHandler(58): pid=76 java.io.IOException: Unable to complete flush {ENCODED => d2fcaf53b78370d16571065501f9880b, NAME => 'TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:17:53,801 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=76 java.io.IOException: Unable to complete flush {ENCODED => d2fcaf53b78370d16571065501f9880b, NAME => 'TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:17:53,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.HMaster(4114): Remote procedure failed, pid=76 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d2fcaf53b78370d16571065501f9880b, NAME => 'TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d2fcaf53b78370d16571065501f9880b, NAME => 'TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:17:53,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=75 2024-12-06T10:17:53,880 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2fcaf53b78370d16571065501f9880b, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:53,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48122 deadline: 1733480333879, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2fcaf53b78370d16571065501f9880b, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:53,885 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2fcaf53b78370d16571065501f9880b, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:53,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48138 deadline: 1733480333883, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2fcaf53b78370d16571065501f9880b, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:53,886 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2fcaf53b78370d16571065501f9880b, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:53,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48052 deadline: 1733480333884, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2fcaf53b78370d16571065501f9880b, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:53,892 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2fcaf53b78370d16571065501f9880b, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:53,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48084 deadline: 1733480333889, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2fcaf53b78370d16571065501f9880b, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:53,933 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2fcaf53b78370d16571065501f9880b, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:53,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48160 deadline: 1733480333932, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2fcaf53b78370d16571065501f9880b, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:53,959 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,33397,1733480204743 2024-12-06T10:17:53,959 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33397 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=76 2024-12-06T10:17:53,959 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b. 2024-12-06T10:17:53,959 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b. as already flushing 2024-12-06T10:17:53,959 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b. 2024-12-06T10:17:53,959 ERROR [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] handler.RSProcedureHandler(58): pid=76 java.io.IOException: Unable to complete flush {ENCODED => d2fcaf53b78370d16571065501f9880b, NAME => 'TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:17:53,960 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=76 java.io.IOException: Unable to complete flush {ENCODED => d2fcaf53b78370d16571065501f9880b, NAME => 'TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:17:53,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.HMaster(4114): Remote procedure failed, pid=76 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d2fcaf53b78370d16571065501f9880b, NAME => 'TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d2fcaf53b78370d16571065501f9880b, NAME => 'TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:17:54,033 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=50 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/.tmp/C/6a32c6c29a1d403393220e0da06cbf6b 2024-12-06T10:17:54,038 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/.tmp/A/7888a79dcea74802ba0695d1927ced71 as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/A/7888a79dcea74802ba0695d1927ced71 2024-12-06T10:17:54,042 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/A/7888a79dcea74802ba0695d1927ced71, entries=200, sequenceid=50, filesize=14.0 K 2024-12-06T10:17:54,042 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/.tmp/B/91120bcc98a244bda205ba8c69b31bb3 as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/B/91120bcc98a244bda205ba8c69b31bb3 2024-12-06T10:17:54,046 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/B/91120bcc98a244bda205ba8c69b31bb3, entries=150, sequenceid=50, filesize=11.7 K 2024-12-06T10:17:54,047 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/.tmp/C/6a32c6c29a1d403393220e0da06cbf6b as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/C/6a32c6c29a1d403393220e0da06cbf6b 2024-12-06T10:17:54,051 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/C/6a32c6c29a1d403393220e0da06cbf6b, entries=150, sequenceid=50, filesize=11.7 K 2024-12-06T10:17:54,052 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for d2fcaf53b78370d16571065501f9880b in 496ms, sequenceid=50, compaction requested=true 2024-12-06T10:17:54,052 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for d2fcaf53b78370d16571065501f9880b: 2024-12-06T10:17:54,052 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d2fcaf53b78370d16571065501f9880b:A, priority=-2147483648, current under compaction store size is 1 2024-12-06T10:17:54,052 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T10:17:54,052 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-06T10:17:54,052 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-06T10:17:54,052 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d2fcaf53b78370d16571065501f9880b:B, priority=-2147483648, current under compaction store size is 2 2024-12-06T10:17:54,053 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T10:17:54,053 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d2fcaf53b78370d16571065501f9880b:C, priority=-2147483648, current under compaction store size is 3 2024-12-06T10:17:54,053 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-06T10:17:54,053 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 38343 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-06T10:17:54,053 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HStore(1540): d2fcaf53b78370d16571065501f9880b/A is initiating minor compaction (all files) 2024-12-06T10:17:54,053 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-06T10:17:54,054 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HStore(1540): d2fcaf53b78370d16571065501f9880b/B is initiating minor compaction (all files) 2024-12-06T10:17:54,054 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d2fcaf53b78370d16571065501f9880b/A in TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b. 2024-12-06T10:17:54,054 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d2fcaf53b78370d16571065501f9880b/B in TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b. 2024-12-06T10:17:54,054 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/A/2d68d28dbd6949318fca792f0e19ddca, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/A/467650f799504aef96acc78c92d527f1, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/A/7888a79dcea74802ba0695d1927ced71] into tmpdir=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/.tmp, totalSize=37.4 K 2024-12-06T10:17:54,054 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/B/0b25f74849814bc48784f8fdd481131b, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/B/c9a39067726d44c696cab789b22d20b7, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/B/91120bcc98a244bda205ba8c69b31bb3] into tmpdir=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/.tmp, totalSize=35.2 K 2024-12-06T10:17:54,054 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.Compactor(224): Compacting 2d68d28dbd6949318fca792f0e19ddca, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=13, earliestPutTs=1733480272719 2024-12-06T10:17:54,054 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.Compactor(224): Compacting 0b25f74849814bc48784f8fdd481131b, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=13, earliestPutTs=1733480272719 2024-12-06T10:17:54,054 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.Compactor(224): Compacting c9a39067726d44c696cab789b22d20b7, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=39, earliestPutTs=1733480272737 2024-12-06T10:17:54,055 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.Compactor(224): Compacting 467650f799504aef96acc78c92d527f1, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=39, earliestPutTs=1733480272737 2024-12-06T10:17:54,055 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.Compactor(224): Compacting 91120bcc98a244bda205ba8c69b31bb3, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=50, earliestPutTs=1733480273429 2024-12-06T10:17:54,055 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.Compactor(224): Compacting 7888a79dcea74802ba0695d1927ced71, keycount=200, bloomtype=ROW, size=14.0 K, encoding=NONE, compression=NONE, seqNum=50, earliestPutTs=1733480273429 2024-12-06T10:17:54,064 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): d2fcaf53b78370d16571065501f9880b#A#compaction#248 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-06T10:17:54,064 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): d2fcaf53b78370d16571065501f9880b#B#compaction#249 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-06T10:17:54,065 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/.tmp/A/432a82c592324f94a7d2eebdc64c0219 is 50, key is test_row_0/A:col10/1733480273443/Put/seqid=0 2024-12-06T10:17:54,065 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/.tmp/B/26e43629ebf948a6933e35eb5369c903 is 50, key is test_row_0/B:col10/1733480273443/Put/seqid=0 2024-12-06T10:17:54,099 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742124_1300 (size=12104) 2024-12-06T10:17:54,110 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/.tmp/A/432a82c592324f94a7d2eebdc64c0219 as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/A/432a82c592324f94a7d2eebdc64c0219 2024-12-06T10:17:54,112 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,33397,1733480204743 2024-12-06T10:17:54,112 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33397 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=76 2024-12-06T10:17:54,112 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b. 2024-12-06T10:17:54,112 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegion(2837): Flushing d2fcaf53b78370d16571065501f9880b 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-12-06T10:17:54,113 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d2fcaf53b78370d16571065501f9880b, store=A 2024-12-06T10:17:54,113 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:17:54,113 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d2fcaf53b78370d16571065501f9880b, store=B 2024-12-06T10:17:54,113 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:17:54,113 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d2fcaf53b78370d16571065501f9880b, store=C 2024-12-06T10:17:54,113 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:17:54,117 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742125_1301 (size=12104) 2024-12-06T10:17:54,120 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in d2fcaf53b78370d16571065501f9880b/A of d2fcaf53b78370d16571065501f9880b into 432a82c592324f94a7d2eebdc64c0219(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-06T10:17:54,120 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d2fcaf53b78370d16571065501f9880b: 2024-12-06T10:17:54,121 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b., storeName=d2fcaf53b78370d16571065501f9880b/A, priority=13, startTime=1733480274052; duration=0sec 2024-12-06T10:17:54,121 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-06T10:17:54,121 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d2fcaf53b78370d16571065501f9880b:A 2024-12-06T10:17:54,121 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-06T10:17:54,123 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/.tmp/A/cd049b8bd4094a66a8d1c2d57253689a is 50, key is test_row_0/A:col10/1733480273621/Put/seqid=0 2024-12-06T10:17:54,125 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-06T10:17:54,125 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HStore(1540): d2fcaf53b78370d16571065501f9880b/C is initiating minor compaction (all files) 2024-12-06T10:17:54,125 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d2fcaf53b78370d16571065501f9880b/C in TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b. 2024-12-06T10:17:54,126 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/C/04d5bcc116024343a214486e6cd1ab35, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/C/818d811852df485ea360cb1a7a946468, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/C/6a32c6c29a1d403393220e0da06cbf6b] into tmpdir=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/.tmp, totalSize=35.2 K 2024-12-06T10:17:54,126 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.Compactor(224): Compacting 04d5bcc116024343a214486e6cd1ab35, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=13, earliestPutTs=1733480272719 2024-12-06T10:17:54,126 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.Compactor(224): Compacting 818d811852df485ea360cb1a7a946468, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=39, earliestPutTs=1733480272737 2024-12-06T10:17:54,126 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/.tmp/B/26e43629ebf948a6933e35eb5369c903 as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/B/26e43629ebf948a6933e35eb5369c903 2024-12-06T10:17:54,127 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.Compactor(224): Compacting 6a32c6c29a1d403393220e0da06cbf6b, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=50, earliestPutTs=1733480273429 2024-12-06T10:17:54,132 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in d2fcaf53b78370d16571065501f9880b/B of d2fcaf53b78370d16571065501f9880b into 26e43629ebf948a6933e35eb5369c903(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-06T10:17:54,132 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d2fcaf53b78370d16571065501f9880b: 2024-12-06T10:17:54,132 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b., storeName=d2fcaf53b78370d16571065501f9880b/B, priority=13, startTime=1733480274052; duration=0sec 2024-12-06T10:17:54,132 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T10:17:54,132 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d2fcaf53b78370d16571065501f9880b:B 2024-12-06T10:17:54,134 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742126_1302 (size=12001) 2024-12-06T10:17:54,142 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): d2fcaf53b78370d16571065501f9880b#C#compaction#251 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-06T10:17:54,142 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/.tmp/C/b44832bc3bcb4bf4ab827c5f0680e417 is 50, key is test_row_0/C:col10/1733480273443/Put/seqid=0 2024-12-06T10:17:54,147 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742127_1303 (size=12104) 2024-12-06T10:17:54,238 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(8581): Flush requested on d2fcaf53b78370d16571065501f9880b 2024-12-06T10:17:54,238 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b. as already flushing 2024-12-06T10:17:54,258 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2fcaf53b78370d16571065501f9880b, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:54,259 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48160 deadline: 1733480334258, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2fcaf53b78370d16571065501f9880b, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:54,361 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2fcaf53b78370d16571065501f9880b, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:54,361 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48160 deadline: 1733480334360, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2fcaf53b78370d16571065501f9880b, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:54,535 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=75 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/.tmp/A/cd049b8bd4094a66a8d1c2d57253689a 2024-12-06T10:17:54,544 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/.tmp/B/3d7dd87f16d64d648a9111481768cde6 is 50, key is test_row_0/B:col10/1733480273621/Put/seqid=0 2024-12-06T10:17:54,552 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/.tmp/C/b44832bc3bcb4bf4ab827c5f0680e417 as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/C/b44832bc3bcb4bf4ab827c5f0680e417 2024-12-06T10:17:54,557 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in d2fcaf53b78370d16571065501f9880b/C of d2fcaf53b78370d16571065501f9880b into b44832bc3bcb4bf4ab827c5f0680e417(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-06T10:17:54,558 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d2fcaf53b78370d16571065501f9880b: 2024-12-06T10:17:54,558 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b., storeName=d2fcaf53b78370d16571065501f9880b/C, priority=13, startTime=1733480274053; duration=0sec 2024-12-06T10:17:54,558 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T10:17:54,558 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d2fcaf53b78370d16571065501f9880b:C 2024-12-06T10:17:54,564 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2fcaf53b78370d16571065501f9880b, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:54,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48160 deadline: 1733480334564, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2fcaf53b78370d16571065501f9880b, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:54,565 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742128_1304 (size=12001) 2024-12-06T10:17:54,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=75 2024-12-06T10:17:54,868 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2fcaf53b78370d16571065501f9880b, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:54,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48160 deadline: 1733480334867, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2fcaf53b78370d16571065501f9880b, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:54,890 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2fcaf53b78370d16571065501f9880b, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:54,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 25 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48052 deadline: 1733480334888, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2fcaf53b78370d16571065501f9880b, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:54,890 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2fcaf53b78370d16571065501f9880b, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:54,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48122 deadline: 1733480334889, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2fcaf53b78370d16571065501f9880b, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:54,891 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2fcaf53b78370d16571065501f9880b, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:54,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48138 deadline: 1733480334890, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2fcaf53b78370d16571065501f9880b, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:54,899 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2fcaf53b78370d16571065501f9880b, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:54,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48084 deadline: 1733480334898, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2fcaf53b78370d16571065501f9880b, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:54,966 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=75 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/.tmp/B/3d7dd87f16d64d648a9111481768cde6 2024-12-06T10:17:54,977 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/.tmp/C/da2c34349ff34d38beb0dafdd07f2b85 is 50, key is test_row_0/C:col10/1733480273621/Put/seqid=0 2024-12-06T10:17:55,025 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742129_1305 (size=12001) 2024-12-06T10:17:55,025 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=75 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/.tmp/C/da2c34349ff34d38beb0dafdd07f2b85 2024-12-06T10:17:55,034 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/.tmp/A/cd049b8bd4094a66a8d1c2d57253689a as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/A/cd049b8bd4094a66a8d1c2d57253689a 2024-12-06T10:17:55,038 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/A/cd049b8bd4094a66a8d1c2d57253689a, entries=150, sequenceid=75, filesize=11.7 K 2024-12-06T10:17:55,039 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/.tmp/B/3d7dd87f16d64d648a9111481768cde6 as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/B/3d7dd87f16d64d648a9111481768cde6 2024-12-06T10:17:55,043 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/B/3d7dd87f16d64d648a9111481768cde6, entries=150, sequenceid=75, filesize=11.7 K 2024-12-06T10:17:55,044 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/.tmp/C/da2c34349ff34d38beb0dafdd07f2b85 as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/C/da2c34349ff34d38beb0dafdd07f2b85 2024-12-06T10:17:55,055 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/C/da2c34349ff34d38beb0dafdd07f2b85, entries=150, sequenceid=75, filesize=11.7 K 2024-12-06T10:17:55,056 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=53.67 KB/54960 for d2fcaf53b78370d16571065501f9880b in 944ms, sequenceid=75, compaction requested=false 2024-12-06T10:17:55,056 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegion(2538): Flush status journal for d2fcaf53b78370d16571065501f9880b: 2024-12-06T10:17:55,056 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b. 2024-12-06T10:17:55,056 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=76 2024-12-06T10:17:55,057 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.HMaster(4106): Remote procedure done, pid=76 2024-12-06T10:17:55,059 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=76, resume processing ppid=75 2024-12-06T10:17:55,059 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=76, ppid=75, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.3480 sec 2024-12-06T10:17:55,064 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=75, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=75, table=TestAcidGuarantees in 2.3570 sec 2024-12-06T10:17:55,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(8581): Flush requested on d2fcaf53b78370d16571065501f9880b 2024-12-06T10:17:55,389 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing d2fcaf53b78370d16571065501f9880b 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-12-06T10:17:55,390 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d2fcaf53b78370d16571065501f9880b, store=A 2024-12-06T10:17:55,390 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:17:55,390 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d2fcaf53b78370d16571065501f9880b, store=B 2024-12-06T10:17:55,390 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:17:55,390 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d2fcaf53b78370d16571065501f9880b, store=C 2024-12-06T10:17:55,390 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:17:55,405 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/.tmp/A/c3b2e21d0a2d4c6ca14dcfa29dd7a93c is 50, key is test_row_0/A:col10/1733480274257/Put/seqid=0 2024-12-06T10:17:55,413 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742130_1306 (size=14341) 2024-12-06T10:17:55,415 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=90 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/.tmp/A/c3b2e21d0a2d4c6ca14dcfa29dd7a93c 2024-12-06T10:17:55,426 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/.tmp/B/18ecfc49c81c40ca96e419db0363d45d is 50, key is test_row_0/B:col10/1733480274257/Put/seqid=0 2024-12-06T10:17:55,435 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742131_1307 (size=12001) 2024-12-06T10:17:55,436 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=90 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/.tmp/B/18ecfc49c81c40ca96e419db0363d45d 2024-12-06T10:17:55,444 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/.tmp/C/8e169e7db6724c1caed2c645d64d167d is 50, key is test_row_0/C:col10/1733480274257/Put/seqid=0 2024-12-06T10:17:55,452 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742132_1308 (size=12001) 2024-12-06T10:17:55,455 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=90 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/.tmp/C/8e169e7db6724c1caed2c645d64d167d 2024-12-06T10:17:55,460 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/.tmp/A/c3b2e21d0a2d4c6ca14dcfa29dd7a93c as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/A/c3b2e21d0a2d4c6ca14dcfa29dd7a93c 2024-12-06T10:17:55,465 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/A/c3b2e21d0a2d4c6ca14dcfa29dd7a93c, entries=200, sequenceid=90, filesize=14.0 K 2024-12-06T10:17:55,471 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/.tmp/B/18ecfc49c81c40ca96e419db0363d45d as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/B/18ecfc49c81c40ca96e419db0363d45d 2024-12-06T10:17:55,474 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/B/18ecfc49c81c40ca96e419db0363d45d, entries=150, sequenceid=90, filesize=11.7 K 2024-12-06T10:17:55,476 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/.tmp/C/8e169e7db6724c1caed2c645d64d167d as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/C/8e169e7db6724c1caed2c645d64d167d 2024-12-06T10:17:55,480 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/C/8e169e7db6724c1caed2c645d64d167d, entries=150, sequenceid=90, filesize=11.7 K 2024-12-06T10:17:55,481 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for d2fcaf53b78370d16571065501f9880b in 92ms, sequenceid=90, compaction requested=true 2024-12-06T10:17:55,481 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for d2fcaf53b78370d16571065501f9880b: 2024-12-06T10:17:55,481 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d2fcaf53b78370d16571065501f9880b:A, priority=-2147483648, current under compaction store size is 1 2024-12-06T10:17:55,481 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T10:17:55,481 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-06T10:17:55,481 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d2fcaf53b78370d16571065501f9880b:B, priority=-2147483648, current under compaction store size is 2 2024-12-06T10:17:55,481 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T10:17:55,481 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-06T10:17:55,481 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d2fcaf53b78370d16571065501f9880b:C, priority=-2147483648, current under compaction store size is 3 2024-12-06T10:17:55,481 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-06T10:17:55,482 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36106 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-06T10:17:55,482 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 38446 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-06T10:17:55,482 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HStore(1540): d2fcaf53b78370d16571065501f9880b/A is initiating minor compaction (all files) 2024-12-06T10:17:55,482 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HStore(1540): d2fcaf53b78370d16571065501f9880b/B is initiating minor compaction (all files) 2024-12-06T10:17:55,482 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d2fcaf53b78370d16571065501f9880b/A in TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b. 2024-12-06T10:17:55,482 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d2fcaf53b78370d16571065501f9880b/B in TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b. 2024-12-06T10:17:55,482 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/A/432a82c592324f94a7d2eebdc64c0219, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/A/cd049b8bd4094a66a8d1c2d57253689a, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/A/c3b2e21d0a2d4c6ca14dcfa29dd7a93c] into tmpdir=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/.tmp, totalSize=37.5 K 2024-12-06T10:17:55,482 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/B/26e43629ebf948a6933e35eb5369c903, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/B/3d7dd87f16d64d648a9111481768cde6, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/B/18ecfc49c81c40ca96e419db0363d45d] into tmpdir=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/.tmp, totalSize=35.3 K 2024-12-06T10:17:55,483 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.Compactor(224): Compacting 26e43629ebf948a6933e35eb5369c903, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=50, earliestPutTs=1733480273429 2024-12-06T10:17:55,483 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.Compactor(224): Compacting 432a82c592324f94a7d2eebdc64c0219, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=50, earliestPutTs=1733480273429 2024-12-06T10:17:55,483 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.Compactor(224): Compacting 3d7dd87f16d64d648a9111481768cde6, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=75, earliestPutTs=1733480273600 2024-12-06T10:17:55,483 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.Compactor(224): Compacting cd049b8bd4094a66a8d1c2d57253689a, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=75, earliestPutTs=1733480273600 2024-12-06T10:17:55,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(8581): Flush requested on d2fcaf53b78370d16571065501f9880b 2024-12-06T10:17:55,484 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.Compactor(224): Compacting c3b2e21d0a2d4c6ca14dcfa29dd7a93c, keycount=200, bloomtype=ROW, size=14.0 K, encoding=NONE, compression=NONE, seqNum=90, earliestPutTs=1733480274241 2024-12-06T10:17:55,484 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.Compactor(224): Compacting 18ecfc49c81c40ca96e419db0363d45d, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=90, earliestPutTs=1733480274252 2024-12-06T10:17:55,484 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing d2fcaf53b78370d16571065501f9880b 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-12-06T10:17:55,484 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d2fcaf53b78370d16571065501f9880b, store=A 2024-12-06T10:17:55,485 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:17:55,485 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d2fcaf53b78370d16571065501f9880b, store=B 2024-12-06T10:17:55,485 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:17:55,485 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d2fcaf53b78370d16571065501f9880b, store=C 2024-12-06T10:17:55,486 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:17:55,494 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/.tmp/A/dfd6cedd381945bcae195365d803c1e3 is 50, key is test_row_0/A:col10/1733480275470/Put/seqid=0 2024-12-06T10:17:55,510 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): d2fcaf53b78370d16571065501f9880b#B#compaction#258 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-06T10:17:55,511 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/.tmp/B/af9cf15d7d254c7b9f9921975582767a is 50, key is test_row_0/B:col10/1733480274257/Put/seqid=0 2024-12-06T10:17:55,513 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): d2fcaf53b78370d16571065501f9880b#A#compaction#259 average throughput is 1.31 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-06T10:17:55,514 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/.tmp/A/44cd6c6e89f64563828b045fcd9425cf is 50, key is test_row_0/A:col10/1733480274257/Put/seqid=0 2024-12-06T10:17:55,515 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2fcaf53b78370d16571065501f9880b, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:55,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 94 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48160 deadline: 1733480335515, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2fcaf53b78370d16571065501f9880b, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:55,527 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742133_1309 (size=12001) 2024-12-06T10:17:55,527 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=115 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/.tmp/A/dfd6cedd381945bcae195365d803c1e3 2024-12-06T10:17:55,537 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742135_1311 (size=12207) 2024-12-06T10:17:55,542 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/.tmp/B/af9cf15d7d254c7b9f9921975582767a as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/B/af9cf15d7d254c7b9f9921975582767a 2024-12-06T10:17:55,547 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in d2fcaf53b78370d16571065501f9880b/B of d2fcaf53b78370d16571065501f9880b into af9cf15d7d254c7b9f9921975582767a(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-06T10:17:55,547 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d2fcaf53b78370d16571065501f9880b: 2024-12-06T10:17:55,547 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b., storeName=d2fcaf53b78370d16571065501f9880b/B, priority=13, startTime=1733480275481; duration=0sec 2024-12-06T10:17:55,547 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-06T10:17:55,547 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d2fcaf53b78370d16571065501f9880b:B 2024-12-06T10:17:55,547 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-06T10:17:55,548 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36106 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-06T10:17:55,548 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HStore(1540): d2fcaf53b78370d16571065501f9880b/C is initiating minor compaction (all files) 2024-12-06T10:17:55,549 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d2fcaf53b78370d16571065501f9880b/C in TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b. 2024-12-06T10:17:55,549 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/C/b44832bc3bcb4bf4ab827c5f0680e417, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/C/da2c34349ff34d38beb0dafdd07f2b85, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/C/8e169e7db6724c1caed2c645d64d167d] into tmpdir=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/.tmp, totalSize=35.3 K 2024-12-06T10:17:55,549 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742134_1310 (size=12207) 2024-12-06T10:17:55,550 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.Compactor(224): Compacting b44832bc3bcb4bf4ab827c5f0680e417, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=50, earliestPutTs=1733480273429 2024-12-06T10:17:55,550 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.Compactor(224): Compacting da2c34349ff34d38beb0dafdd07f2b85, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=75, earliestPutTs=1733480273600 2024-12-06T10:17:55,551 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.Compactor(224): Compacting 8e169e7db6724c1caed2c645d64d167d, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=90, earliestPutTs=1733480274252 2024-12-06T10:17:55,554 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/.tmp/B/5a7b4e42b4624c80bdf6acb82a09a704 is 50, key is test_row_0/B:col10/1733480275470/Put/seqid=0 2024-12-06T10:17:55,555 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/.tmp/A/44cd6c6e89f64563828b045fcd9425cf as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/A/44cd6c6e89f64563828b045fcd9425cf 2024-12-06T10:17:55,560 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in d2fcaf53b78370d16571065501f9880b/A of d2fcaf53b78370d16571065501f9880b into 44cd6c6e89f64563828b045fcd9425cf(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-06T10:17:55,560 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d2fcaf53b78370d16571065501f9880b: 2024-12-06T10:17:55,560 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b., storeName=d2fcaf53b78370d16571065501f9880b/A, priority=13, startTime=1733480275481; duration=0sec 2024-12-06T10:17:55,560 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T10:17:55,560 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d2fcaf53b78370d16571065501f9880b:A 2024-12-06T10:17:55,562 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742136_1312 (size=12001) 2024-12-06T10:17:55,565 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=115 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/.tmp/B/5a7b4e42b4624c80bdf6acb82a09a704 2024-12-06T10:17:55,569 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): d2fcaf53b78370d16571065501f9880b#C#compaction#261 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-06T10:17:55,570 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/.tmp/C/3051800afebd48828f0fe8ecbd15d8f5 is 50, key is test_row_0/C:col10/1733480274257/Put/seqid=0 2024-12-06T10:17:55,586 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/.tmp/C/684f37f50206416d8695f8fad7b244a1 is 50, key is test_row_0/C:col10/1733480275470/Put/seqid=0 2024-12-06T10:17:55,591 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742137_1313 (size=12207) 2024-12-06T10:17:55,598 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/.tmp/C/3051800afebd48828f0fe8ecbd15d8f5 as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/C/3051800afebd48828f0fe8ecbd15d8f5 2024-12-06T10:17:55,604 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in d2fcaf53b78370d16571065501f9880b/C of d2fcaf53b78370d16571065501f9880b into 3051800afebd48828f0fe8ecbd15d8f5(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-06T10:17:55,604 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d2fcaf53b78370d16571065501f9880b: 2024-12-06T10:17:55,604 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b., storeName=d2fcaf53b78370d16571065501f9880b/C, priority=13, startTime=1733480275481; duration=0sec 2024-12-06T10:17:55,604 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T10:17:55,604 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d2fcaf53b78370d16571065501f9880b:C 2024-12-06T10:17:55,608 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742138_1314 (size=12001) 2024-12-06T10:17:55,618 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2fcaf53b78370d16571065501f9880b, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:55,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48160 deadline: 1733480335617, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2fcaf53b78370d16571065501f9880b, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:55,769 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-12-06T10:17:55,821 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2fcaf53b78370d16571065501f9880b, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:55,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48160 deadline: 1733480335819, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2fcaf53b78370d16571065501f9880b, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:56,009 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=115 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/.tmp/C/684f37f50206416d8695f8fad7b244a1 2024-12-06T10:17:56,014 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/.tmp/A/dfd6cedd381945bcae195365d803c1e3 as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/A/dfd6cedd381945bcae195365d803c1e3 2024-12-06T10:17:56,018 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/A/dfd6cedd381945bcae195365d803c1e3, entries=150, sequenceid=115, filesize=11.7 K 2024-12-06T10:17:56,018 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/.tmp/B/5a7b4e42b4624c80bdf6acb82a09a704 as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/B/5a7b4e42b4624c80bdf6acb82a09a704 2024-12-06T10:17:56,022 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/B/5a7b4e42b4624c80bdf6acb82a09a704, entries=150, sequenceid=115, filesize=11.7 K 2024-12-06T10:17:56,023 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/.tmp/C/684f37f50206416d8695f8fad7b244a1 as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/C/684f37f50206416d8695f8fad7b244a1 2024-12-06T10:17:56,027 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/C/684f37f50206416d8695f8fad7b244a1, entries=150, sequenceid=115, filesize=11.7 K 2024-12-06T10:17:56,027 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=53.67 KB/54960 for d2fcaf53b78370d16571065501f9880b in 543ms, sequenceid=115, compaction requested=false 2024-12-06T10:17:56,027 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for d2fcaf53b78370d16571065501f9880b: 2024-12-06T10:17:56,127 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(8581): Flush requested on d2fcaf53b78370d16571065501f9880b 2024-12-06T10:17:56,127 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing d2fcaf53b78370d16571065501f9880b 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-12-06T10:17:56,127 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d2fcaf53b78370d16571065501f9880b, store=A 2024-12-06T10:17:56,127 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:17:56,127 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d2fcaf53b78370d16571065501f9880b, store=B 2024-12-06T10:17:56,127 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:17:56,127 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d2fcaf53b78370d16571065501f9880b, store=C 2024-12-06T10:17:56,128 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:17:56,132 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/.tmp/A/9cee9cb6e20547d1bd5886525467e9bb is 50, key is test_row_0/A:col10/1733480275514/Put/seqid=0 2024-12-06T10:17:56,146 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742139_1315 (size=12051) 2024-12-06T10:17:56,147 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=130 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/.tmp/A/9cee9cb6e20547d1bd5886525467e9bb 2024-12-06T10:17:56,157 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/.tmp/B/e95257d7b5ed4d52a6a1f6ebacfd2c87 is 50, key is test_row_0/B:col10/1733480275514/Put/seqid=0 2024-12-06T10:17:56,162 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742140_1316 (size=12051) 2024-12-06T10:17:56,163 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=130 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/.tmp/B/e95257d7b5ed4d52a6a1f6ebacfd2c87 2024-12-06T10:17:56,171 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/.tmp/C/7370127546684b45a4b2cfa0f185ceaf is 50, key is test_row_0/C:col10/1733480275514/Put/seqid=0 2024-12-06T10:17:56,181 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742141_1317 (size=12051) 2024-12-06T10:17:56,193 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2fcaf53b78370d16571065501f9880b, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:56,193 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 122 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48160 deadline: 1733480336191, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2fcaf53b78370d16571065501f9880b, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:56,296 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2fcaf53b78370d16571065501f9880b, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:56,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 124 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48160 deadline: 1733480336294, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2fcaf53b78370d16571065501f9880b, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:56,498 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2fcaf53b78370d16571065501f9880b, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:56,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 126 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48160 deadline: 1733480336497, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2fcaf53b78370d16571065501f9880b, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:56,582 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=130 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/.tmp/C/7370127546684b45a4b2cfa0f185ceaf 2024-12-06T10:17:56,586 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/.tmp/A/9cee9cb6e20547d1bd5886525467e9bb as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/A/9cee9cb6e20547d1bd5886525467e9bb 2024-12-06T10:17:56,591 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/A/9cee9cb6e20547d1bd5886525467e9bb, entries=150, sequenceid=130, filesize=11.8 K 2024-12-06T10:17:56,592 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/.tmp/B/e95257d7b5ed4d52a6a1f6ebacfd2c87 as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/B/e95257d7b5ed4d52a6a1f6ebacfd2c87 2024-12-06T10:17:56,596 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/B/e95257d7b5ed4d52a6a1f6ebacfd2c87, entries=150, sequenceid=130, filesize=11.8 K 2024-12-06T10:17:56,597 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/.tmp/C/7370127546684b45a4b2cfa0f185ceaf as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/C/7370127546684b45a4b2cfa0f185ceaf 2024-12-06T10:17:56,602 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/C/7370127546684b45a4b2cfa0f185ceaf, entries=150, sequenceid=130, filesize=11.8 K 2024-12-06T10:17:56,606 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for d2fcaf53b78370d16571065501f9880b in 479ms, sequenceid=130, compaction requested=true 2024-12-06T10:17:56,606 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for d2fcaf53b78370d16571065501f9880b: 2024-12-06T10:17:56,606 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-06T10:17:56,607 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d2fcaf53b78370d16571065501f9880b:A, priority=-2147483648, current under compaction store size is 1 2024-12-06T10:17:56,608 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36259 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-06T10:17:56,608 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HStore(1540): d2fcaf53b78370d16571065501f9880b/A is initiating minor compaction (all files) 2024-12-06T10:17:56,608 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d2fcaf53b78370d16571065501f9880b/A in TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b. 2024-12-06T10:17:56,608 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/A/44cd6c6e89f64563828b045fcd9425cf, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/A/dfd6cedd381945bcae195365d803c1e3, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/A/9cee9cb6e20547d1bd5886525467e9bb] into tmpdir=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/.tmp, totalSize=35.4 K 2024-12-06T10:17:56,608 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T10:17:56,608 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-06T10:17:56,609 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.Compactor(224): Compacting 44cd6c6e89f64563828b045fcd9425cf, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=90, earliestPutTs=1733480274252 2024-12-06T10:17:56,609 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d2fcaf53b78370d16571065501f9880b:B, priority=-2147483648, current under compaction store size is 2 2024-12-06T10:17:56,609 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.Compactor(224): Compacting dfd6cedd381945bcae195365d803c1e3, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=115, earliestPutTs=1733480275470 2024-12-06T10:17:56,610 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T10:17:56,610 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36259 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-06T10:17:56,610 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HStore(1540): d2fcaf53b78370d16571065501f9880b/B is initiating minor compaction (all files) 2024-12-06T10:17:56,610 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d2fcaf53b78370d16571065501f9880b/B in TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b. 2024-12-06T10:17:56,610 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/B/af9cf15d7d254c7b9f9921975582767a, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/B/5a7b4e42b4624c80bdf6acb82a09a704, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/B/e95257d7b5ed4d52a6a1f6ebacfd2c87] into tmpdir=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/.tmp, totalSize=35.4 K 2024-12-06T10:17:56,610 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.Compactor(224): Compacting af9cf15d7d254c7b9f9921975582767a, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=90, earliestPutTs=1733480274252 2024-12-06T10:17:56,611 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.Compactor(224): Compacting 9cee9cb6e20547d1bd5886525467e9bb, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=130, earliestPutTs=1733480275487 2024-12-06T10:17:56,612 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.Compactor(224): Compacting 5a7b4e42b4624c80bdf6acb82a09a704, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=115, earliestPutTs=1733480275470 2024-12-06T10:17:56,612 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.Compactor(224): Compacting e95257d7b5ed4d52a6a1f6ebacfd2c87, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=130, earliestPutTs=1733480275487 2024-12-06T10:17:56,610 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d2fcaf53b78370d16571065501f9880b:C, priority=-2147483648, current under compaction store size is 3 2024-12-06T10:17:56,614 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-06T10:17:56,630 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): d2fcaf53b78370d16571065501f9880b#A#compaction#266 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-06T10:17:56,631 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/.tmp/A/331f12ea77284796bb3106fbea9bc829 is 50, key is test_row_0/A:col10/1733480275514/Put/seqid=0 2024-12-06T10:17:56,634 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): d2fcaf53b78370d16571065501f9880b#B#compaction#267 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-06T10:17:56,634 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/.tmp/B/aad0cf12251a4f1a883da2908a602e46 is 50, key is test_row_0/B:col10/1733480275514/Put/seqid=0 2024-12-06T10:17:56,664 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742143_1319 (size=12359) 2024-12-06T10:17:56,671 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/.tmp/A/331f12ea77284796bb3106fbea9bc829 as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/A/331f12ea77284796bb3106fbea9bc829 2024-12-06T10:17:56,677 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in d2fcaf53b78370d16571065501f9880b/A of d2fcaf53b78370d16571065501f9880b into 331f12ea77284796bb3106fbea9bc829(size=12.1 K), total size for store is 12.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-06T10:17:56,677 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d2fcaf53b78370d16571065501f9880b: 2024-12-06T10:17:56,677 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b., storeName=d2fcaf53b78370d16571065501f9880b/A, priority=13, startTime=1733480276606; duration=0sec 2024-12-06T10:17:56,677 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-06T10:17:56,677 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d2fcaf53b78370d16571065501f9880b:A 2024-12-06T10:17:56,677 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-06T10:17:56,679 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36259 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-06T10:17:56,679 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HStore(1540): d2fcaf53b78370d16571065501f9880b/C is initiating minor compaction (all files) 2024-12-06T10:17:56,679 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d2fcaf53b78370d16571065501f9880b/C in TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b. 2024-12-06T10:17:56,680 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742142_1318 (size=12359) 2024-12-06T10:17:56,680 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/C/3051800afebd48828f0fe8ecbd15d8f5, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/C/684f37f50206416d8695f8fad7b244a1, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/C/7370127546684b45a4b2cfa0f185ceaf] into tmpdir=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/.tmp, totalSize=35.4 K 2024-12-06T10:17:56,681 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.Compactor(224): Compacting 3051800afebd48828f0fe8ecbd15d8f5, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=90, earliestPutTs=1733480274252 2024-12-06T10:17:56,681 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.Compactor(224): Compacting 684f37f50206416d8695f8fad7b244a1, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=115, earliestPutTs=1733480275470 2024-12-06T10:17:56,682 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.Compactor(224): Compacting 7370127546684b45a4b2cfa0f185ceaf, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=130, earliestPutTs=1733480275487 2024-12-06T10:17:56,690 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): d2fcaf53b78370d16571065501f9880b#C#compaction#268 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-06T10:17:56,690 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/.tmp/C/e8ac6f4366f14f04985f0c14846b1dea is 50, key is test_row_0/C:col10/1733480275514/Put/seqid=0 2024-12-06T10:17:56,710 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742144_1320 (size=12359) 2024-12-06T10:17:56,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(8581): Flush requested on d2fcaf53b78370d16571065501f9880b 2024-12-06T10:17:56,804 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing d2fcaf53b78370d16571065501f9880b 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-12-06T10:17:56,804 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d2fcaf53b78370d16571065501f9880b, store=A 2024-12-06T10:17:56,805 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:17:56,805 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d2fcaf53b78370d16571065501f9880b, store=B 2024-12-06T10:17:56,805 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:17:56,805 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d2fcaf53b78370d16571065501f9880b, store=C 2024-12-06T10:17:56,805 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:17:56,810 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/.tmp/A/bdb9067db6b44ebe9fa2eafde6e3c90c is 50, key is test_row_0/A:col10/1733480276187/Put/seqid=0 2024-12-06T10:17:56,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=75 2024-12-06T10:17:56,814 INFO [Thread-1321 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 75 completed 2024-12-06T10:17:56,816 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-06T10:17:56,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] procedure2.ProcedureExecutor(1098): Stored pid=77, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=77, table=TestAcidGuarantees 2024-12-06T10:17:56,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=77 2024-12-06T10:17:56,817 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=77, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=77, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-06T10:17:56,818 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=77, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=77, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-06T10:17:56,818 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=78, ppid=77, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-06T10:17:56,832 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742145_1321 (size=12151) 2024-12-06T10:17:56,852 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2fcaf53b78370d16571065501f9880b, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:56,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 137 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48160 deadline: 1733480336850, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2fcaf53b78370d16571065501f9880b, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:56,894 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2fcaf53b78370d16571065501f9880b, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:56,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48052 deadline: 1733480336894, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2fcaf53b78370d16571065501f9880b, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:56,895 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2fcaf53b78370d16571065501f9880b, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:56,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48122 deadline: 1733480336894, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2fcaf53b78370d16571065501f9880b, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:56,895 DEBUG [Thread-1315 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4128 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2fcaf53b78370d16571065501f9880b, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_2' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b., hostname=552d6a33fa09,33397,1733480204743, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2fcaf53b78370d16571065501f9880b, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2fcaf53b78370d16571065501f9880b, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-06T10:17:56,895 DEBUG [Thread-1319 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4131 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2fcaf53b78370d16571065501f9880b, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_2' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b., hostname=552d6a33fa09,33397,1733480204743, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2fcaf53b78370d16571065501f9880b, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2fcaf53b78370d16571065501f9880b, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-06T10:17:56,897 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2fcaf53b78370d16571065501f9880b, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:56,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48138 deadline: 1733480336897, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2fcaf53b78370d16571065501f9880b, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:56,898 DEBUG [Thread-1317 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4179 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2fcaf53b78370d16571065501f9880b, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b., hostname=552d6a33fa09,33397,1733480204743, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2fcaf53b78370d16571065501f9880b, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2fcaf53b78370d16571065501f9880b, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-06T10:17:56,905 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2fcaf53b78370d16571065501f9880b, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:56,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 25 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48084 deadline: 1733480336904, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2fcaf53b78370d16571065501f9880b, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:56,905 DEBUG [Thread-1313 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4141 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2fcaf53b78370d16571065501f9880b, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_2' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b., hostname=552d6a33fa09,33397,1733480204743, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2fcaf53b78370d16571065501f9880b, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2fcaf53b78370d16571065501f9880b, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-06T10:17:56,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=77 2024-12-06T10:17:56,955 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2fcaf53b78370d16571065501f9880b, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:56,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 139 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48160 deadline: 1733480336953, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2fcaf53b78370d16571065501f9880b, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:56,970 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,33397,1733480204743 2024-12-06T10:17:56,971 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33397 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=78 2024-12-06T10:17:56,971 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b. 2024-12-06T10:17:56,971 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b. as already flushing 2024-12-06T10:17:56,971 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b. 2024-12-06T10:17:56,971 ERROR [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] handler.RSProcedureHandler(58): pid=78 java.io.IOException: Unable to complete flush {ENCODED => d2fcaf53b78370d16571065501f9880b, NAME => 'TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:17:56,971 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=78 java.io.IOException: Unable to complete flush {ENCODED => d2fcaf53b78370d16571065501f9880b, NAME => 'TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:17:56,976 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.HMaster(4114): Remote procedure failed, pid=78 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d2fcaf53b78370d16571065501f9880b, NAME => 'TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d2fcaf53b78370d16571065501f9880b, NAME => 'TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:17:57,086 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/.tmp/B/aad0cf12251a4f1a883da2908a602e46 as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/B/aad0cf12251a4f1a883da2908a602e46 2024-12-06T10:17:57,096 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in d2fcaf53b78370d16571065501f9880b/B of d2fcaf53b78370d16571065501f9880b into aad0cf12251a4f1a883da2908a602e46(size=12.1 K), total size for store is 12.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-06T10:17:57,096 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d2fcaf53b78370d16571065501f9880b: 2024-12-06T10:17:57,096 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b., storeName=d2fcaf53b78370d16571065501f9880b/B, priority=13, startTime=1733480276608; duration=0sec 2024-12-06T10:17:57,096 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T10:17:57,096 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d2fcaf53b78370d16571065501f9880b:B 2024-12-06T10:17:57,116 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/.tmp/C/e8ac6f4366f14f04985f0c14846b1dea as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/C/e8ac6f4366f14f04985f0c14846b1dea 2024-12-06T10:17:57,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=77 2024-12-06T10:17:57,121 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in d2fcaf53b78370d16571065501f9880b/C of d2fcaf53b78370d16571065501f9880b into e8ac6f4366f14f04985f0c14846b1dea(size=12.1 K), total size for store is 12.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-06T10:17:57,121 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d2fcaf53b78370d16571065501f9880b: 2024-12-06T10:17:57,121 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b., storeName=d2fcaf53b78370d16571065501f9880b/C, priority=13, startTime=1733480276610; duration=0sec 2024-12-06T10:17:57,121 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T10:17:57,121 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d2fcaf53b78370d16571065501f9880b:C 2024-12-06T10:17:57,127 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,33397,1733480204743 2024-12-06T10:17:57,127 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33397 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=78 2024-12-06T10:17:57,128 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b. 2024-12-06T10:17:57,128 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b. as already flushing 2024-12-06T10:17:57,128 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b. 2024-12-06T10:17:57,128 ERROR [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] handler.RSProcedureHandler(58): pid=78 java.io.IOException: Unable to complete flush {ENCODED => d2fcaf53b78370d16571065501f9880b, NAME => 'TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:17:57,128 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=78 java.io.IOException: Unable to complete flush {ENCODED => d2fcaf53b78370d16571065501f9880b, NAME => 'TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:17:57,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.HMaster(4114): Remote procedure failed, pid=78 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d2fcaf53b78370d16571065501f9880b, NAME => 'TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d2fcaf53b78370d16571065501f9880b, NAME => 'TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:17:57,158 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2fcaf53b78370d16571065501f9880b, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:57,158 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 141 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48160 deadline: 1733480337157, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2fcaf53b78370d16571065501f9880b, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:57,233 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=156 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/.tmp/A/bdb9067db6b44ebe9fa2eafde6e3c90c 2024-12-06T10:17:57,262 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/.tmp/B/847103391737481fbbd3bd0c748725bd is 50, key is test_row_0/B:col10/1733480276187/Put/seqid=0 2024-12-06T10:17:57,271 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742146_1322 (size=12151) 2024-12-06T10:17:57,272 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=156 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/.tmp/B/847103391737481fbbd3bd0c748725bd 2024-12-06T10:17:57,280 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/.tmp/C/b09f15d14aca40798728530c024c027b is 50, key is test_row_0/C:col10/1733480276187/Put/seqid=0 2024-12-06T10:17:57,281 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,33397,1733480204743 2024-12-06T10:17:57,281 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33397 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=78 2024-12-06T10:17:57,282 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b. 2024-12-06T10:17:57,282 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b. as already flushing 2024-12-06T10:17:57,282 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b. 2024-12-06T10:17:57,282 ERROR [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] handler.RSProcedureHandler(58): pid=78 java.io.IOException: Unable to complete flush {ENCODED => d2fcaf53b78370d16571065501f9880b, NAME => 'TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:17:57,282 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=78 java.io.IOException: Unable to complete flush {ENCODED => d2fcaf53b78370d16571065501f9880b, NAME => 'TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:17:57,282 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.HMaster(4114): Remote procedure failed, pid=78 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d2fcaf53b78370d16571065501f9880b, NAME => 'TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d2fcaf53b78370d16571065501f9880b, NAME => 'TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:17:57,291 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742147_1323 (size=12151) 2024-12-06T10:17:57,292 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=156 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/.tmp/C/b09f15d14aca40798728530c024c027b 2024-12-06T10:17:57,298 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/.tmp/A/bdb9067db6b44ebe9fa2eafde6e3c90c as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/A/bdb9067db6b44ebe9fa2eafde6e3c90c 2024-12-06T10:17:57,304 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/A/bdb9067db6b44ebe9fa2eafde6e3c90c, entries=150, sequenceid=156, filesize=11.9 K 2024-12-06T10:17:57,305 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/.tmp/B/847103391737481fbbd3bd0c748725bd as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/B/847103391737481fbbd3bd0c748725bd 2024-12-06T10:17:57,309 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/B/847103391737481fbbd3bd0c748725bd, entries=150, sequenceid=156, filesize=11.9 K 2024-12-06T10:17:57,310 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/.tmp/C/b09f15d14aca40798728530c024c027b as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/C/b09f15d14aca40798728530c024c027b 2024-12-06T10:17:57,314 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/C/b09f15d14aca40798728530c024c027b, entries=150, sequenceid=156, filesize=11.9 K 2024-12-06T10:17:57,315 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=53.67 KB/54960 for d2fcaf53b78370d16571065501f9880b in 511ms, sequenceid=156, compaction requested=false 2024-12-06T10:17:57,315 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for d2fcaf53b78370d16571065501f9880b: 2024-12-06T10:17:57,419 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=77 2024-12-06T10:17:57,437 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,33397,1733480204743 2024-12-06T10:17:57,437 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33397 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=78 2024-12-06T10:17:57,438 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b. 2024-12-06T10:17:57,438 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegion(2837): Flushing d2fcaf53b78370d16571065501f9880b 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-06T10:17:57,438 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d2fcaf53b78370d16571065501f9880b, store=A 2024-12-06T10:17:57,438 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:17:57,438 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d2fcaf53b78370d16571065501f9880b, store=B 2024-12-06T10:17:57,438 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:17:57,438 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d2fcaf53b78370d16571065501f9880b, store=C 2024-12-06T10:17:57,438 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:17:57,453 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/.tmp/A/8035ae33d05d4fd68a60dedcafec4c7a is 50, key is test_row_0/A:col10/1733480276849/Put/seqid=0 2024-12-06T10:17:57,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(8581): Flush requested on d2fcaf53b78370d16571065501f9880b 2024-12-06T10:17:57,463 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b. as already flushing 2024-12-06T10:17:57,467 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742148_1324 (size=12151) 2024-12-06T10:17:57,468 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=169 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/.tmp/A/8035ae33d05d4fd68a60dedcafec4c7a 2024-12-06T10:17:57,477 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/.tmp/B/04b390015ada4d3693b5162993a09ada is 50, key is test_row_0/B:col10/1733480276849/Put/seqid=0 2024-12-06T10:17:57,483 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742149_1325 (size=12151) 2024-12-06T10:17:57,529 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2fcaf53b78370d16571065501f9880b, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:57,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 165 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48160 deadline: 1733480337529, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2fcaf53b78370d16571065501f9880b, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:57,633 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2fcaf53b78370d16571065501f9880b, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:57,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 167 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48160 deadline: 1733480337630, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2fcaf53b78370d16571065501f9880b, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:57,837 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2fcaf53b78370d16571065501f9880b, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:57,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 169 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48160 deadline: 1733480337835, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2fcaf53b78370d16571065501f9880b, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:57,884 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=169 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/.tmp/B/04b390015ada4d3693b5162993a09ada 2024-12-06T10:17:57,906 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/.tmp/C/37e1a8ecf9de4777a3dc449120a3c8f4 is 50, key is test_row_0/C:col10/1733480276849/Put/seqid=0 2024-12-06T10:17:57,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=77 2024-12-06T10:17:57,933 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742150_1326 (size=12151) 2024-12-06T10:17:58,141 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2fcaf53b78370d16571065501f9880b, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:58,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 171 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48160 deadline: 1733480338140, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2fcaf53b78370d16571065501f9880b, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:58,334 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=169 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/.tmp/C/37e1a8ecf9de4777a3dc449120a3c8f4 2024-12-06T10:17:58,339 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/.tmp/A/8035ae33d05d4fd68a60dedcafec4c7a as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/A/8035ae33d05d4fd68a60dedcafec4c7a 2024-12-06T10:17:58,342 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/A/8035ae33d05d4fd68a60dedcafec4c7a, entries=150, sequenceid=169, filesize=11.9 K 2024-12-06T10:17:58,343 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/.tmp/B/04b390015ada4d3693b5162993a09ada as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/B/04b390015ada4d3693b5162993a09ada 2024-12-06T10:17:58,347 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/B/04b390015ada4d3693b5162993a09ada, entries=150, sequenceid=169, filesize=11.9 K 2024-12-06T10:17:58,348 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/.tmp/C/37e1a8ecf9de4777a3dc449120a3c8f4 as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/C/37e1a8ecf9de4777a3dc449120a3c8f4 2024-12-06T10:17:58,353 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/C/37e1a8ecf9de4777a3dc449120a3c8f4, entries=150, sequenceid=169, filesize=11.9 K 2024-12-06T10:17:58,354 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for d2fcaf53b78370d16571065501f9880b in 915ms, sequenceid=169, compaction requested=true 2024-12-06T10:17:58,354 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegion(2538): Flush status journal for d2fcaf53b78370d16571065501f9880b: 2024-12-06T10:17:58,354 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b. 2024-12-06T10:17:58,354 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=78 2024-12-06T10:17:58,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.HMaster(4106): Remote procedure done, pid=78 2024-12-06T10:17:58,357 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=78, resume processing ppid=77 2024-12-06T10:17:58,357 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=78, ppid=77, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.5370 sec 2024-12-06T10:17:58,358 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=77, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=77, table=TestAcidGuarantees in 1.5410 sec 2024-12-06T10:17:58,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(8581): Flush requested on d2fcaf53b78370d16571065501f9880b 2024-12-06T10:17:58,649 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing d2fcaf53b78370d16571065501f9880b 3/3 column families, dataSize=154.31 KB heapSize=405.05 KB 2024-12-06T10:17:58,649 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d2fcaf53b78370d16571065501f9880b, store=A 2024-12-06T10:17:58,649 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:17:58,649 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d2fcaf53b78370d16571065501f9880b, store=B 2024-12-06T10:17:58,649 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:17:58,649 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d2fcaf53b78370d16571065501f9880b, store=C 2024-12-06T10:17:58,649 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:17:58,654 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/.tmp/A/99a841503b9745c2941195cc9a9b4f04 is 50, key is test_row_0/A:col10/1733480277528/Put/seqid=0 2024-12-06T10:17:58,658 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742151_1327 (size=14541) 2024-12-06T10:17:58,673 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2fcaf53b78370d16571065501f9880b, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:58,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 181 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48160 deadline: 1733480338671, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2fcaf53b78370d16571065501f9880b, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:58,776 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2fcaf53b78370d16571065501f9880b, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:58,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 183 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48160 deadline: 1733480338775, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2fcaf53b78370d16571065501f9880b, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:58,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=77 2024-12-06T10:17:58,922 INFO [Thread-1321 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 77 completed 2024-12-06T10:17:58,923 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-06T10:17:58,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] procedure2.ProcedureExecutor(1098): Stored pid=79, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=79, table=TestAcidGuarantees 2024-12-06T10:17:58,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=79 2024-12-06T10:17:58,925 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=79, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=79, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-06T10:17:58,925 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=79, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=79, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-06T10:17:58,925 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=80, ppid=79, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-06T10:17:58,980 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2fcaf53b78370d16571065501f9880b, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:58,980 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 185 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48160 deadline: 1733480338978, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2fcaf53b78370d16571065501f9880b, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:59,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=79 2024-12-06T10:17:59,059 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=195 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/.tmp/A/99a841503b9745c2941195cc9a9b4f04 2024-12-06T10:17:59,077 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,33397,1733480204743 2024-12-06T10:17:59,077 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33397 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=80 2024-12-06T10:17:59,077 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b. 2024-12-06T10:17:59,077 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b. as already flushing 2024-12-06T10:17:59,078 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b. 2024-12-06T10:17:59,078 ERROR [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] handler.RSProcedureHandler(58): pid=80 java.io.IOException: Unable to complete flush {ENCODED => d2fcaf53b78370d16571065501f9880b, NAME => 'TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:17:59,078 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=80 java.io.IOException: Unable to complete flush {ENCODED => d2fcaf53b78370d16571065501f9880b, NAME => 'TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:17:59,078 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/.tmp/B/a5aa60f0a5074786b5117c290b9c10b7 is 50, key is test_row_0/B:col10/1733480277528/Put/seqid=0 2024-12-06T10:17:59,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.HMaster(4114): Remote procedure failed, pid=80 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d2fcaf53b78370d16571065501f9880b, NAME => 'TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d2fcaf53b78370d16571065501f9880b, NAME => 'TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:17:59,093 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742152_1328 (size=12151) 2024-12-06T10:17:59,226 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=79 2024-12-06T10:17:59,229 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,33397,1733480204743 2024-12-06T10:17:59,229 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33397 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=80 2024-12-06T10:17:59,229 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b. 2024-12-06T10:17:59,230 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b. as already flushing 2024-12-06T10:17:59,230 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b. 2024-12-06T10:17:59,230 ERROR [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] handler.RSProcedureHandler(58): pid=80 java.io.IOException: Unable to complete flush {ENCODED => d2fcaf53b78370d16571065501f9880b, NAME => 'TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:17:59,230 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=80 java.io.IOException: Unable to complete flush {ENCODED => d2fcaf53b78370d16571065501f9880b, NAME => 'TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:17:59,230 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.HMaster(4114): Remote procedure failed, pid=80 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d2fcaf53b78370d16571065501f9880b, NAME => 'TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d2fcaf53b78370d16571065501f9880b, NAME => 'TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:17:59,283 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2fcaf53b78370d16571065501f9880b, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:59,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 187 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48160 deadline: 1733480339281, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2fcaf53b78370d16571065501f9880b, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:59,382 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,33397,1733480204743 2024-12-06T10:17:59,382 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33397 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=80 2024-12-06T10:17:59,383 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b. 2024-12-06T10:17:59,383 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b. as already flushing 2024-12-06T10:17:59,383 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b. 2024-12-06T10:17:59,383 ERROR [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] handler.RSProcedureHandler(58): pid=80 java.io.IOException: Unable to complete flush {ENCODED => d2fcaf53b78370d16571065501f9880b, NAME => 'TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:17:59,383 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=80 java.io.IOException: Unable to complete flush {ENCODED => d2fcaf53b78370d16571065501f9880b, NAME => 'TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:17:59,383 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.HMaster(4114): Remote procedure failed, pid=80 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d2fcaf53b78370d16571065501f9880b, NAME => 'TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d2fcaf53b78370d16571065501f9880b, NAME => 'TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:17:59,494 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=195 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/.tmp/B/a5aa60f0a5074786b5117c290b9c10b7 2024-12-06T10:17:59,507 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/.tmp/C/cf48dccda030418ca9ea5b03d7f9fd26 is 50, key is test_row_0/C:col10/1733480277528/Put/seqid=0 2024-12-06T10:17:59,527 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742153_1329 (size=12151) 2024-12-06T10:17:59,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=79 2024-12-06T10:17:59,528 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=195 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/.tmp/C/cf48dccda030418ca9ea5b03d7f9fd26 2024-12-06T10:17:59,534 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/.tmp/A/99a841503b9745c2941195cc9a9b4f04 as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/A/99a841503b9745c2941195cc9a9b4f04 2024-12-06T10:17:59,535 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,33397,1733480204743 2024-12-06T10:17:59,535 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33397 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=80 2024-12-06T10:17:59,535 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b. 2024-12-06T10:17:59,536 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b. as already flushing 2024-12-06T10:17:59,536 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b. 2024-12-06T10:17:59,536 ERROR [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] handler.RSProcedureHandler(58): pid=80 java.io.IOException: Unable to complete flush {ENCODED => d2fcaf53b78370d16571065501f9880b, NAME => 'TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:17:59,536 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=80 java.io.IOException: Unable to complete flush {ENCODED => d2fcaf53b78370d16571065501f9880b, NAME => 'TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:17:59,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.HMaster(4114): Remote procedure failed, pid=80 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d2fcaf53b78370d16571065501f9880b, NAME => 'TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d2fcaf53b78370d16571065501f9880b, NAME => 'TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:17:59,540 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/A/99a841503b9745c2941195cc9a9b4f04, entries=200, sequenceid=195, filesize=14.2 K 2024-12-06T10:17:59,541 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/.tmp/B/a5aa60f0a5074786b5117c290b9c10b7 as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/B/a5aa60f0a5074786b5117c290b9c10b7 2024-12-06T10:17:59,544 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/B/a5aa60f0a5074786b5117c290b9c10b7, entries=150, sequenceid=195, filesize=11.9 K 2024-12-06T10:17:59,546 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/.tmp/C/cf48dccda030418ca9ea5b03d7f9fd26 as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/C/cf48dccda030418ca9ea5b03d7f9fd26 2024-12-06T10:17:59,550 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/C/cf48dccda030418ca9ea5b03d7f9fd26, entries=150, sequenceid=195, filesize=11.9 K 2024-12-06T10:17:59,551 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~154.31 KB/158010, heapSize ~405 KB/414720, currentSize=46.96 KB/48090 for d2fcaf53b78370d16571065501f9880b in 902ms, sequenceid=195, compaction requested=true 2024-12-06T10:17:59,551 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for d2fcaf53b78370d16571065501f9880b: 2024-12-06T10:17:59,551 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d2fcaf53b78370d16571065501f9880b:A, priority=-2147483648, current under compaction store size is 1 2024-12-06T10:17:59,551 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T10:17:59,551 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d2fcaf53b78370d16571065501f9880b:B, priority=-2147483648, current under compaction store size is 2 2024-12-06T10:17:59,551 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-06T10:17:59,551 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T10:17:59,551 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d2fcaf53b78370d16571065501f9880b:C, priority=-2147483648, current under compaction store size is 3 2024-12-06T10:17:59,551 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-06T10:17:59,551 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-06T10:17:59,553 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 51202 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-06T10:17:59,553 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HStore(1540): d2fcaf53b78370d16571065501f9880b/A is initiating minor compaction (all files) 2024-12-06T10:17:59,553 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d2fcaf53b78370d16571065501f9880b/A in TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b. 2024-12-06T10:17:59,553 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/A/331f12ea77284796bb3106fbea9bc829, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/A/bdb9067db6b44ebe9fa2eafde6e3c90c, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/A/8035ae33d05d4fd68a60dedcafec4c7a, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/A/99a841503b9745c2941195cc9a9b4f04] into tmpdir=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/.tmp, totalSize=50.0 K 2024-12-06T10:17:59,553 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48812 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-06T10:17:59,553 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HStore(1540): d2fcaf53b78370d16571065501f9880b/B is initiating minor compaction (all files) 2024-12-06T10:17:59,553 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d2fcaf53b78370d16571065501f9880b/B in TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b. 2024-12-06T10:17:59,553 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/B/aad0cf12251a4f1a883da2908a602e46, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/B/847103391737481fbbd3bd0c748725bd, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/B/04b390015ada4d3693b5162993a09ada, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/B/a5aa60f0a5074786b5117c290b9c10b7] into tmpdir=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/.tmp, totalSize=47.7 K 2024-12-06T10:17:59,554 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.Compactor(224): Compacting aad0cf12251a4f1a883da2908a602e46, keycount=150, bloomtype=ROW, size=12.1 K, encoding=NONE, compression=NONE, seqNum=130, earliestPutTs=1733480275487 2024-12-06T10:17:59,554 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.Compactor(224): Compacting 331f12ea77284796bb3106fbea9bc829, keycount=150, bloomtype=ROW, size=12.1 K, encoding=NONE, compression=NONE, seqNum=130, earliestPutTs=1733480275487 2024-12-06T10:17:59,555 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.Compactor(224): Compacting 847103391737481fbbd3bd0c748725bd, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=156, earliestPutTs=1733480276187 2024-12-06T10:17:59,555 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.Compactor(224): Compacting bdb9067db6b44ebe9fa2eafde6e3c90c, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=156, earliestPutTs=1733480276187 2024-12-06T10:17:59,555 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.Compactor(224): Compacting 04b390015ada4d3693b5162993a09ada, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=169, earliestPutTs=1733480276825 2024-12-06T10:17:59,555 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.Compactor(224): Compacting 8035ae33d05d4fd68a60dedcafec4c7a, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=169, earliestPutTs=1733480276825 2024-12-06T10:17:59,556 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.Compactor(224): Compacting a5aa60f0a5074786b5117c290b9c10b7, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=195, earliestPutTs=1733480277521 2024-12-06T10:17:59,556 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.Compactor(224): Compacting 99a841503b9745c2941195cc9a9b4f04, keycount=200, bloomtype=ROW, size=14.2 K, encoding=NONE, compression=NONE, seqNum=195, earliestPutTs=1733480277521 2024-12-06T10:17:59,570 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): d2fcaf53b78370d16571065501f9880b#B#compaction#278 average throughput is 1.64 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-06T10:17:59,570 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): d2fcaf53b78370d16571065501f9880b#A#compaction#279 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-06T10:17:59,571 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/.tmp/B/3b8ddd5defd947e7a183562979589006 is 50, key is test_row_0/B:col10/1733480277528/Put/seqid=0 2024-12-06T10:17:59,571 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/.tmp/A/34575bd6e68d4110b6db3609b95b9a4d is 50, key is test_row_0/A:col10/1733480277528/Put/seqid=0 2024-12-06T10:17:59,594 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742154_1330 (size=12595) 2024-12-06T10:17:59,601 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/.tmp/B/3b8ddd5defd947e7a183562979589006 as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/B/3b8ddd5defd947e7a183562979589006 2024-12-06T10:17:59,608 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in d2fcaf53b78370d16571065501f9880b/B of d2fcaf53b78370d16571065501f9880b into 3b8ddd5defd947e7a183562979589006(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-06T10:17:59,608 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d2fcaf53b78370d16571065501f9880b: 2024-12-06T10:17:59,608 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b., storeName=d2fcaf53b78370d16571065501f9880b/B, priority=12, startTime=1733480279551; duration=0sec 2024-12-06T10:17:59,608 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-06T10:17:59,608 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d2fcaf53b78370d16571065501f9880b:B 2024-12-06T10:17:59,609 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-06T10:17:59,610 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48812 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-06T10:17:59,610 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HStore(1540): d2fcaf53b78370d16571065501f9880b/C is initiating minor compaction (all files) 2024-12-06T10:17:59,610 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d2fcaf53b78370d16571065501f9880b/C in TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b. 2024-12-06T10:17:59,610 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/C/e8ac6f4366f14f04985f0c14846b1dea, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/C/b09f15d14aca40798728530c024c027b, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/C/37e1a8ecf9de4777a3dc449120a3c8f4, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/C/cf48dccda030418ca9ea5b03d7f9fd26] into tmpdir=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/.tmp, totalSize=47.7 K 2024-12-06T10:17:59,611 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.Compactor(224): Compacting e8ac6f4366f14f04985f0c14846b1dea, keycount=150, bloomtype=ROW, size=12.1 K, encoding=NONE, compression=NONE, seqNum=130, earliestPutTs=1733480275487 2024-12-06T10:17:59,611 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.Compactor(224): Compacting b09f15d14aca40798728530c024c027b, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=156, earliestPutTs=1733480276187 2024-12-06T10:17:59,612 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.Compactor(224): Compacting 37e1a8ecf9de4777a3dc449120a3c8f4, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=169, earliestPutTs=1733480276825 2024-12-06T10:17:59,612 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.Compactor(224): Compacting cf48dccda030418ca9ea5b03d7f9fd26, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=195, earliestPutTs=1733480277521 2024-12-06T10:17:59,617 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742155_1331 (size=12595) 2024-12-06T10:17:59,625 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/.tmp/A/34575bd6e68d4110b6db3609b95b9a4d as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/A/34575bd6e68d4110b6db3609b95b9a4d 2024-12-06T10:17:59,629 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): d2fcaf53b78370d16571065501f9880b#C#compaction#280 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-06T10:17:59,630 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/.tmp/C/6199bad2dd7848b38177793044a7012a is 50, key is test_row_0/C:col10/1733480277528/Put/seqid=0 2024-12-06T10:17:59,635 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in d2fcaf53b78370d16571065501f9880b/A of d2fcaf53b78370d16571065501f9880b into 34575bd6e68d4110b6db3609b95b9a4d(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-06T10:17:59,635 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d2fcaf53b78370d16571065501f9880b: 2024-12-06T10:17:59,635 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b., storeName=d2fcaf53b78370d16571065501f9880b/A, priority=12, startTime=1733480279551; duration=0sec 2024-12-06T10:17:59,635 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T10:17:59,635 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d2fcaf53b78370d16571065501f9880b:A 2024-12-06T10:17:59,645 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742156_1332 (size=12595) 2024-12-06T10:17:59,653 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/.tmp/C/6199bad2dd7848b38177793044a7012a as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/C/6199bad2dd7848b38177793044a7012a 2024-12-06T10:17:59,659 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in d2fcaf53b78370d16571065501f9880b/C of d2fcaf53b78370d16571065501f9880b into 6199bad2dd7848b38177793044a7012a(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-06T10:17:59,659 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d2fcaf53b78370d16571065501f9880b: 2024-12-06T10:17:59,659 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b., storeName=d2fcaf53b78370d16571065501f9880b/C, priority=12, startTime=1733480279551; duration=0sec 2024-12-06T10:17:59,659 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T10:17:59,659 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d2fcaf53b78370d16571065501f9880b:C 2024-12-06T10:17:59,689 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,33397,1733480204743 2024-12-06T10:17:59,689 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33397 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=80 2024-12-06T10:17:59,689 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b. 2024-12-06T10:17:59,689 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegion(2837): Flushing d2fcaf53b78370d16571065501f9880b 3/3 column families, dataSize=46.96 KB heapSize=123.80 KB 2024-12-06T10:17:59,690 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d2fcaf53b78370d16571065501f9880b, store=A 2024-12-06T10:17:59,690 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:17:59,690 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d2fcaf53b78370d16571065501f9880b, store=B 2024-12-06T10:17:59,690 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:17:59,690 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d2fcaf53b78370d16571065501f9880b, store=C 2024-12-06T10:17:59,690 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:17:59,695 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/.tmp/A/7147eae646c141df84461f420ee19fe3 is 50, key is test_row_0/A:col10/1733480278670/Put/seqid=0 2024-12-06T10:17:59,706 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742157_1333 (size=12151) 2024-12-06T10:17:59,708 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=208 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/.tmp/A/7147eae646c141df84461f420ee19fe3 2024-12-06T10:17:59,715 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/.tmp/B/187af28b04b8491d893ad3590ef1c49c is 50, key is test_row_0/B:col10/1733480278670/Put/seqid=0 2024-12-06T10:17:59,720 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742158_1334 (size=12151) 2024-12-06T10:17:59,721 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=208 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/.tmp/B/187af28b04b8491d893ad3590ef1c49c 2024-12-06T10:17:59,733 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/.tmp/C/94ec74b5c888409b9c9e97280f18e6a6 is 50, key is test_row_0/C:col10/1733480278670/Put/seqid=0 2024-12-06T10:17:59,737 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742159_1335 (size=12151) 2024-12-06T10:17:59,738 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=208 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/.tmp/C/94ec74b5c888409b9c9e97280f18e6a6 2024-12-06T10:17:59,743 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/.tmp/A/7147eae646c141df84461f420ee19fe3 as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/A/7147eae646c141df84461f420ee19fe3 2024-12-06T10:17:59,747 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/A/7147eae646c141df84461f420ee19fe3, entries=150, sequenceid=208, filesize=11.9 K 2024-12-06T10:17:59,748 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/.tmp/B/187af28b04b8491d893ad3590ef1c49c as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/B/187af28b04b8491d893ad3590ef1c49c 2024-12-06T10:17:59,752 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/B/187af28b04b8491d893ad3590ef1c49c, entries=150, sequenceid=208, filesize=11.9 K 2024-12-06T10:17:59,753 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/.tmp/C/94ec74b5c888409b9c9e97280f18e6a6 as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/C/94ec74b5c888409b9c9e97280f18e6a6 2024-12-06T10:17:59,758 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/C/94ec74b5c888409b9c9e97280f18e6a6, entries=150, sequenceid=208, filesize=11.9 K 2024-12-06T10:17:59,759 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegion(3040): Finished flush of dataSize ~46.96 KB/48090, heapSize ~123.75 KB/126720, currentSize=0 B/0 for d2fcaf53b78370d16571065501f9880b in 70ms, sequenceid=208, compaction requested=false 2024-12-06T10:17:59,759 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegion(2538): Flush status journal for d2fcaf53b78370d16571065501f9880b: 2024-12-06T10:17:59,759 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b. 2024-12-06T10:17:59,759 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=80 2024-12-06T10:17:59,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.HMaster(4106): Remote procedure done, pid=80 2024-12-06T10:17:59,762 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=80, resume processing ppid=79 2024-12-06T10:17:59,762 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=80, ppid=79, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 835 msec 2024-12-06T10:17:59,764 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=79, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=79, table=TestAcidGuarantees in 840 msec 2024-12-06T10:17:59,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(8581): Flush requested on d2fcaf53b78370d16571065501f9880b 2024-12-06T10:17:59,798 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing d2fcaf53b78370d16571065501f9880b 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-06T10:17:59,799 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d2fcaf53b78370d16571065501f9880b, store=A 2024-12-06T10:17:59,799 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:17:59,799 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d2fcaf53b78370d16571065501f9880b, store=B 2024-12-06T10:17:59,799 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:17:59,799 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d2fcaf53b78370d16571065501f9880b, store=C 2024-12-06T10:17:59,799 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:17:59,804 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/.tmp/A/2128398bfd284391b81a2032e9e90043 is 50, key is test_row_0/A:col10/1733480279793/Put/seqid=0 2024-12-06T10:17:59,817 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742160_1336 (size=9757) 2024-12-06T10:17:59,818 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=219 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/.tmp/A/2128398bfd284391b81a2032e9e90043 2024-12-06T10:17:59,825 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/.tmp/B/327fb56ddee142178ae19135e5a3c1f6 is 50, key is test_row_0/B:col10/1733480279793/Put/seqid=0 2024-12-06T10:17:59,830 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742161_1337 (size=9757) 2024-12-06T10:17:59,830 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=219 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/.tmp/B/327fb56ddee142178ae19135e5a3c1f6 2024-12-06T10:17:59,838 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/.tmp/C/57dba0f12fff467f8299e8bee8b693ac is 50, key is test_row_0/C:col10/1733480279793/Put/seqid=0 2024-12-06T10:17:59,853 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742162_1338 (size=9757) 2024-12-06T10:17:59,857 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2fcaf53b78370d16571065501f9880b, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:59,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 219 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48160 deadline: 1733480339855, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2fcaf53b78370d16571065501f9880b, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:17:59,958 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2fcaf53b78370d16571065501f9880b, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:17:59,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 221 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48160 deadline: 1733480339958, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2fcaf53b78370d16571065501f9880b, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:00,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=79 2024-12-06T10:18:00,028 INFO [Thread-1321 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 79 completed 2024-12-06T10:18:00,030 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-06T10:18:00,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] procedure2.ProcedureExecutor(1098): Stored pid=81, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=81, table=TestAcidGuarantees 2024-12-06T10:18:00,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=81 2024-12-06T10:18:00,031 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=81, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=81, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-06T10:18:00,032 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=81, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=81, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-06T10:18:00,032 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=82, ppid=81, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-06T10:18:00,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=81 2024-12-06T10:18:00,162 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2fcaf53b78370d16571065501f9880b, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:00,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 223 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48160 deadline: 1733480340160, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2fcaf53b78370d16571065501f9880b, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:00,183 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,33397,1733480204743 2024-12-06T10:18:00,184 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33397 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=82 2024-12-06T10:18:00,184 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b. 2024-12-06T10:18:00,184 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b. as already flushing 2024-12-06T10:18:00,184 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b. 2024-12-06T10:18:00,184 ERROR [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] handler.RSProcedureHandler(58): pid=82 java.io.IOException: Unable to complete flush {ENCODED => d2fcaf53b78370d16571065501f9880b, NAME => 'TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:18:00,184 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=82 java.io.IOException: Unable to complete flush {ENCODED => d2fcaf53b78370d16571065501f9880b, NAME => 'TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:18:00,185 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.HMaster(4114): Remote procedure failed, pid=82 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d2fcaf53b78370d16571065501f9880b, NAME => 'TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d2fcaf53b78370d16571065501f9880b, NAME => 'TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:18:00,254 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=219 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/.tmp/C/57dba0f12fff467f8299e8bee8b693ac 2024-12-06T10:18:00,258 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/.tmp/A/2128398bfd284391b81a2032e9e90043 as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/A/2128398bfd284391b81a2032e9e90043 2024-12-06T10:18:00,262 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/A/2128398bfd284391b81a2032e9e90043, entries=100, sequenceid=219, filesize=9.5 K 2024-12-06T10:18:00,264 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/.tmp/B/327fb56ddee142178ae19135e5a3c1f6 as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/B/327fb56ddee142178ae19135e5a3c1f6 2024-12-06T10:18:00,270 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/B/327fb56ddee142178ae19135e5a3c1f6, entries=100, sequenceid=219, filesize=9.5 K 2024-12-06T10:18:00,270 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/.tmp/C/57dba0f12fff467f8299e8bee8b693ac as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/C/57dba0f12fff467f8299e8bee8b693ac 2024-12-06T10:18:00,274 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/C/57dba0f12fff467f8299e8bee8b693ac, entries=100, sequenceid=219, filesize=9.5 K 2024-12-06T10:18:00,275 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for d2fcaf53b78370d16571065501f9880b in 476ms, sequenceid=219, compaction requested=true 2024-12-06T10:18:00,275 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for d2fcaf53b78370d16571065501f9880b: 2024-12-06T10:18:00,275 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-06T10:18:00,275 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d2fcaf53b78370d16571065501f9880b:A, priority=-2147483648, current under compaction store size is 1 2024-12-06T10:18:00,275 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T10:18:00,275 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-06T10:18:00,275 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d2fcaf53b78370d16571065501f9880b:B, priority=-2147483648, current under compaction store size is 2 2024-12-06T10:18:00,275 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T10:18:00,275 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d2fcaf53b78370d16571065501f9880b:C, priority=-2147483648, current under compaction store size is 3 2024-12-06T10:18:00,275 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-06T10:18:00,277 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 34503 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-06T10:18:00,277 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HStore(1540): d2fcaf53b78370d16571065501f9880b/A is initiating minor compaction (all files) 2024-12-06T10:18:00,277 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d2fcaf53b78370d16571065501f9880b/A in TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b. 2024-12-06T10:18:00,277 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/A/34575bd6e68d4110b6db3609b95b9a4d, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/A/7147eae646c141df84461f420ee19fe3, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/A/2128398bfd284391b81a2032e9e90043] into tmpdir=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/.tmp, totalSize=33.7 K 2024-12-06T10:18:00,277 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 34503 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-06T10:18:00,277 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HStore(1540): d2fcaf53b78370d16571065501f9880b/B is initiating minor compaction (all files) 2024-12-06T10:18:00,277 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d2fcaf53b78370d16571065501f9880b/B in TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b. 2024-12-06T10:18:00,278 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/B/3b8ddd5defd947e7a183562979589006, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/B/187af28b04b8491d893ad3590ef1c49c, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/B/327fb56ddee142178ae19135e5a3c1f6] into tmpdir=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/.tmp, totalSize=33.7 K 2024-12-06T10:18:00,278 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.Compactor(224): Compacting 34575bd6e68d4110b6db3609b95b9a4d, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=195, earliestPutTs=1733480277521 2024-12-06T10:18:00,278 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.Compactor(224): Compacting 7147eae646c141df84461f420ee19fe3, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=208, earliestPutTs=1733480278656 2024-12-06T10:18:00,278 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.Compactor(224): Compacting 3b8ddd5defd947e7a183562979589006, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=195, earliestPutTs=1733480277521 2024-12-06T10:18:00,278 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.Compactor(224): Compacting 2128398bfd284391b81a2032e9e90043, keycount=100, bloomtype=ROW, size=9.5 K, encoding=NONE, compression=NONE, seqNum=219, earliestPutTs=1733480279793 2024-12-06T10:18:00,278 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.Compactor(224): Compacting 187af28b04b8491d893ad3590ef1c49c, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=208, earliestPutTs=1733480278656 2024-12-06T10:18:00,279 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.Compactor(224): Compacting 327fb56ddee142178ae19135e5a3c1f6, keycount=100, bloomtype=ROW, size=9.5 K, encoding=NONE, compression=NONE, seqNum=219, earliestPutTs=1733480279793 2024-12-06T10:18:00,302 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): d2fcaf53b78370d16571065501f9880b#B#compaction#288 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-06T10:18:00,302 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): d2fcaf53b78370d16571065501f9880b#A#compaction#287 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-06T10:18:00,303 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/.tmp/B/3e97c830b57c4ba7ba93120998403622 is 50, key is test_row_0/B:col10/1733480279793/Put/seqid=0 2024-12-06T10:18:00,303 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/.tmp/A/3e65179b747744dc90dcb388a7447dbc is 50, key is test_row_0/A:col10/1733480279793/Put/seqid=0 2024-12-06T10:18:00,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=81 2024-12-06T10:18:00,334 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742163_1339 (size=12697) 2024-12-06T10:18:00,336 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,33397,1733480204743 2024-12-06T10:18:00,337 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33397 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=82 2024-12-06T10:18:00,337 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b. 2024-12-06T10:18:00,337 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegion(2837): Flushing d2fcaf53b78370d16571065501f9880b 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-12-06T10:18:00,337 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d2fcaf53b78370d16571065501f9880b, store=A 2024-12-06T10:18:00,337 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:18:00,337 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d2fcaf53b78370d16571065501f9880b, store=B 2024-12-06T10:18:00,337 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:18:00,337 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d2fcaf53b78370d16571065501f9880b, store=C 2024-12-06T10:18:00,337 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:18:00,342 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/.tmp/A/3e65179b747744dc90dcb388a7447dbc as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/A/3e65179b747744dc90dcb388a7447dbc 2024-12-06T10:18:00,347 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in d2fcaf53b78370d16571065501f9880b/A of d2fcaf53b78370d16571065501f9880b into 3e65179b747744dc90dcb388a7447dbc(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-06T10:18:00,347 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d2fcaf53b78370d16571065501f9880b: 2024-12-06T10:18:00,347 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b., storeName=d2fcaf53b78370d16571065501f9880b/A, priority=13, startTime=1733480280275; duration=0sec 2024-12-06T10:18:00,347 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-06T10:18:00,347 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d2fcaf53b78370d16571065501f9880b:A 2024-12-06T10:18:00,347 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-06T10:18:00,349 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 34503 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-06T10:18:00,349 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HStore(1540): d2fcaf53b78370d16571065501f9880b/C is initiating minor compaction (all files) 2024-12-06T10:18:00,349 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d2fcaf53b78370d16571065501f9880b/C in TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b. 2024-12-06T10:18:00,349 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/C/6199bad2dd7848b38177793044a7012a, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/C/94ec74b5c888409b9c9e97280f18e6a6, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/C/57dba0f12fff467f8299e8bee8b693ac] into tmpdir=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/.tmp, totalSize=33.7 K 2024-12-06T10:18:00,350 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.Compactor(224): Compacting 6199bad2dd7848b38177793044a7012a, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=195, earliestPutTs=1733480277521 2024-12-06T10:18:00,350 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.Compactor(224): Compacting 94ec74b5c888409b9c9e97280f18e6a6, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=208, earliestPutTs=1733480278656 2024-12-06T10:18:00,351 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.Compactor(224): Compacting 57dba0f12fff467f8299e8bee8b693ac, keycount=100, bloomtype=ROW, size=9.5 K, encoding=NONE, compression=NONE, seqNum=219, earliestPutTs=1733480279793 2024-12-06T10:18:00,371 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742164_1340 (size=12697) 2024-12-06T10:18:00,377 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/.tmp/B/3e97c830b57c4ba7ba93120998403622 as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/B/3e97c830b57c4ba7ba93120998403622 2024-12-06T10:18:00,377 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/.tmp/A/de4b86fbb53f4682af7f0264e4661050 is 50, key is test_row_0/A:col10/1733480279851/Put/seqid=0 2024-12-06T10:18:00,378 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): d2fcaf53b78370d16571065501f9880b#C#compaction#290 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-06T10:18:00,378 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/.tmp/C/d5364d6984d042c693510973f2847583 is 50, key is test_row_0/C:col10/1733480279793/Put/seqid=0 2024-12-06T10:18:00,382 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in d2fcaf53b78370d16571065501f9880b/B of d2fcaf53b78370d16571065501f9880b into 3e97c830b57c4ba7ba93120998403622(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-06T10:18:00,382 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d2fcaf53b78370d16571065501f9880b: 2024-12-06T10:18:00,382 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b., storeName=d2fcaf53b78370d16571065501f9880b/B, priority=13, startTime=1733480280275; duration=0sec 2024-12-06T10:18:00,382 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T10:18:00,382 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d2fcaf53b78370d16571065501f9880b:B 2024-12-06T10:18:00,410 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742165_1341 (size=12151) 2024-12-06T10:18:00,443 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742166_1342 (size=12697) 2024-12-06T10:18:00,452 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/.tmp/C/d5364d6984d042c693510973f2847583 as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/C/d5364d6984d042c693510973f2847583 2024-12-06T10:18:00,460 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in d2fcaf53b78370d16571065501f9880b/C of d2fcaf53b78370d16571065501f9880b into d5364d6984d042c693510973f2847583(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-06T10:18:00,460 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d2fcaf53b78370d16571065501f9880b: 2024-12-06T10:18:00,461 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b., storeName=d2fcaf53b78370d16571065501f9880b/C, priority=13, startTime=1733480280275; duration=0sec 2024-12-06T10:18:00,461 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T10:18:00,461 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d2fcaf53b78370d16571065501f9880b:C 2024-12-06T10:18:00,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(8581): Flush requested on d2fcaf53b78370d16571065501f9880b 2024-12-06T10:18:00,466 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b. as already flushing 2024-12-06T10:18:00,487 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2fcaf53b78370d16571065501f9880b, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:00,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 233 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48160 deadline: 1733480340485, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2fcaf53b78370d16571065501f9880b, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:00,589 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2fcaf53b78370d16571065501f9880b, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:00,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 235 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48160 deadline: 1733480340588, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2fcaf53b78370d16571065501f9880b, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:00,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=81 2024-12-06T10:18:00,792 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2fcaf53b78370d16571065501f9880b, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:00,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 237 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48160 deadline: 1733480340791, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2fcaf53b78370d16571065501f9880b, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:00,811 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=244 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/.tmp/A/de4b86fbb53f4682af7f0264e4661050 2024-12-06T10:18:00,817 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/.tmp/B/4fe7587ef4314f42af2af78903edd40c is 50, key is test_row_0/B:col10/1733480279851/Put/seqid=0 2024-12-06T10:18:00,821 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742167_1343 (size=12151) 2024-12-06T10:18:00,905 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2fcaf53b78370d16571065501f9880b, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:00,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48138 deadline: 1733480340904, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2fcaf53b78370d16571065501f9880b, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:00,906 DEBUG [Thread-1317 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8187 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2fcaf53b78370d16571065501f9880b, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b., hostname=552d6a33fa09,33397,1733480204743, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2fcaf53b78370d16571065501f9880b, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2fcaf53b78370d16571065501f9880b, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-06T10:18:00,915 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2fcaf53b78370d16571065501f9880b, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:00,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48122 deadline: 1733480340914, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2fcaf53b78370d16571065501f9880b, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:00,916 DEBUG [Thread-1315 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8149 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2fcaf53b78370d16571065501f9880b, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_2' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b., hostname=552d6a33fa09,33397,1733480204743, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2fcaf53b78370d16571065501f9880b, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2fcaf53b78370d16571065501f9880b, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-06T10:18:00,926 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2fcaf53b78370d16571065501f9880b, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:00,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48052 deadline: 1733480340924, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2fcaf53b78370d16571065501f9880b, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:00,926 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2fcaf53b78370d16571065501f9880b, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:00,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48084 deadline: 1733480340925, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2fcaf53b78370d16571065501f9880b, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:00,927 DEBUG [Thread-1319 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8162 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2fcaf53b78370d16571065501f9880b, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_2' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b., hostname=552d6a33fa09,33397,1733480204743, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2fcaf53b78370d16571065501f9880b, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2fcaf53b78370d16571065501f9880b, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-06T10:18:00,927 DEBUG [Thread-1313 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8163 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2fcaf53b78370d16571065501f9880b, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_2' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b., hostname=552d6a33fa09,33397,1733480204743, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2fcaf53b78370d16571065501f9880b, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2fcaf53b78370d16571065501f9880b, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-06T10:18:01,095 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2fcaf53b78370d16571065501f9880b, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:01,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 239 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48160 deadline: 1733480341094, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2fcaf53b78370d16571065501f9880b, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:01,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=81 2024-12-06T10:18:01,222 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=244 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/.tmp/B/4fe7587ef4314f42af2af78903edd40c 2024-12-06T10:18:01,231 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/.tmp/C/a9ffef46b15745dc9f224adfa976051d is 50, key is test_row_0/C:col10/1733480279851/Put/seqid=0 2024-12-06T10:18:01,236 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742168_1344 (size=12151) 2024-12-06T10:18:01,242 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=244 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/.tmp/C/a9ffef46b15745dc9f224adfa976051d 2024-12-06T10:18:01,246 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/.tmp/A/de4b86fbb53f4682af7f0264e4661050 as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/A/de4b86fbb53f4682af7f0264e4661050 2024-12-06T10:18:01,253 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/A/de4b86fbb53f4682af7f0264e4661050, entries=150, sequenceid=244, filesize=11.9 K 2024-12-06T10:18:01,254 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/.tmp/B/4fe7587ef4314f42af2af78903edd40c as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/B/4fe7587ef4314f42af2af78903edd40c 2024-12-06T10:18:01,259 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/B/4fe7587ef4314f42af2af78903edd40c, entries=150, sequenceid=244, filesize=11.9 K 2024-12-06T10:18:01,261 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/.tmp/C/a9ffef46b15745dc9f224adfa976051d as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/C/a9ffef46b15745dc9f224adfa976051d 2024-12-06T10:18:01,265 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/C/a9ffef46b15745dc9f224adfa976051d, entries=150, sequenceid=244, filesize=11.9 K 2024-12-06T10:18:01,266 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=53.67 KB/54960 for d2fcaf53b78370d16571065501f9880b in 929ms, sequenceid=244, compaction requested=false 2024-12-06T10:18:01,267 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegion(2538): Flush status journal for d2fcaf53b78370d16571065501f9880b: 2024-12-06T10:18:01,267 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b. 2024-12-06T10:18:01,267 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=82 2024-12-06T10:18:01,267 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.HMaster(4106): Remote procedure done, pid=82 2024-12-06T10:18:01,269 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=82, resume processing ppid=81 2024-12-06T10:18:01,269 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=82, ppid=81, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.2360 sec 2024-12-06T10:18:01,271 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=81, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=81, table=TestAcidGuarantees in 1.2400 sec 2024-12-06T10:18:01,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(8581): Flush requested on d2fcaf53b78370d16571065501f9880b 2024-12-06T10:18:01,602 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing d2fcaf53b78370d16571065501f9880b 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-12-06T10:18:01,602 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d2fcaf53b78370d16571065501f9880b, store=A 2024-12-06T10:18:01,602 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:18:01,602 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d2fcaf53b78370d16571065501f9880b, store=B 2024-12-06T10:18:01,602 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:18:01,602 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d2fcaf53b78370d16571065501f9880b, store=C 2024-12-06T10:18:01,602 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:18:01,607 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/.tmp/A/81a993ae8ff6493d990e41ceaf280351 is 50, key is test_row_0/A:col10/1733480280482/Put/seqid=0 2024-12-06T10:18:01,611 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742169_1345 (size=14641) 2024-12-06T10:18:01,612 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=259 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/.tmp/A/81a993ae8ff6493d990e41ceaf280351 2024-12-06T10:18:01,619 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/.tmp/B/8d70e459b4cd4fa0811fb839ca73eceb is 50, key is test_row_0/B:col10/1733480280482/Put/seqid=0 2024-12-06T10:18:01,623 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742170_1346 (size=12201) 2024-12-06T10:18:01,661 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2fcaf53b78370d16571065501f9880b, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:01,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 263 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48160 deadline: 1733480341659, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2fcaf53b78370d16571065501f9880b, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:01,765 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2fcaf53b78370d16571065501f9880b, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:01,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 265 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48160 deadline: 1733480341762, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2fcaf53b78370d16571065501f9880b, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:01,967 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2fcaf53b78370d16571065501f9880b, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:01,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 267 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48160 deadline: 1733480341966, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2fcaf53b78370d16571065501f9880b, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:02,023 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=259 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/.tmp/B/8d70e459b4cd4fa0811fb839ca73eceb 2024-12-06T10:18:02,031 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/.tmp/C/3956c661bff84474b73ce38db2ac27d8 is 50, key is test_row_0/C:col10/1733480280482/Put/seqid=0 2024-12-06T10:18:02,039 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742171_1347 (size=12201) 2024-12-06T10:18:02,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=81 2024-12-06T10:18:02,137 INFO [Thread-1321 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 81 completed 2024-12-06T10:18:02,138 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-06T10:18:02,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] procedure2.ProcedureExecutor(1098): Stored pid=83, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=83, table=TestAcidGuarantees 2024-12-06T10:18:02,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=83 2024-12-06T10:18:02,140 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=83, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=83, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-06T10:18:02,141 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=83, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=83, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-06T10:18:02,141 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=84, ppid=83, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-06T10:18:02,240 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=83 2024-12-06T10:18:02,273 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2fcaf53b78370d16571065501f9880b, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:02,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 269 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48160 deadline: 1733480342272, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2fcaf53b78370d16571065501f9880b, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:02,292 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,33397,1733480204743 2024-12-06T10:18:02,293 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33397 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=84 2024-12-06T10:18:02,293 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b. 2024-12-06T10:18:02,293 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b. as already flushing 2024-12-06T10:18:02,293 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b. 2024-12-06T10:18:02,293 ERROR [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] handler.RSProcedureHandler(58): pid=84 java.io.IOException: Unable to complete flush {ENCODED => d2fcaf53b78370d16571065501f9880b, NAME => 'TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:18:02,293 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=84 java.io.IOException: Unable to complete flush {ENCODED => d2fcaf53b78370d16571065501f9880b, NAME => 'TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:18:02,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.HMaster(4114): Remote procedure failed, pid=84 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d2fcaf53b78370d16571065501f9880b, NAME => 'TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d2fcaf53b78370d16571065501f9880b, NAME => 'TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:18:02,439 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=259 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/.tmp/C/3956c661bff84474b73ce38db2ac27d8 2024-12-06T10:18:02,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=83 2024-12-06T10:18:02,444 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/.tmp/A/81a993ae8ff6493d990e41ceaf280351 as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/A/81a993ae8ff6493d990e41ceaf280351 2024-12-06T10:18:02,445 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,33397,1733480204743 2024-12-06T10:18:02,446 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33397 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=84 2024-12-06T10:18:02,446 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b. 2024-12-06T10:18:02,446 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b. as already flushing 2024-12-06T10:18:02,446 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b. 2024-12-06T10:18:02,446 ERROR [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] handler.RSProcedureHandler(58): pid=84 java.io.IOException: Unable to complete flush {ENCODED => d2fcaf53b78370d16571065501f9880b, NAME => 'TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:18:02,446 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=84 java.io.IOException: Unable to complete flush {ENCODED => d2fcaf53b78370d16571065501f9880b, NAME => 'TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:18:02,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.HMaster(4114): Remote procedure failed, pid=84 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d2fcaf53b78370d16571065501f9880b, NAME => 'TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d2fcaf53b78370d16571065501f9880b, NAME => 'TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:18:02,448 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/A/81a993ae8ff6493d990e41ceaf280351, entries=200, sequenceid=259, filesize=14.3 K 2024-12-06T10:18:02,451 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/.tmp/B/8d70e459b4cd4fa0811fb839ca73eceb as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/B/8d70e459b4cd4fa0811fb839ca73eceb 2024-12-06T10:18:02,467 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/B/8d70e459b4cd4fa0811fb839ca73eceb, entries=150, sequenceid=259, filesize=11.9 K 2024-12-06T10:18:02,469 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/.tmp/C/3956c661bff84474b73ce38db2ac27d8 as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/C/3956c661bff84474b73ce38db2ac27d8 2024-12-06T10:18:02,472 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/C/3956c661bff84474b73ce38db2ac27d8, entries=150, sequenceid=259, filesize=11.9 K 2024-12-06T10:18:02,473 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for d2fcaf53b78370d16571065501f9880b in 871ms, sequenceid=259, compaction requested=true 2024-12-06T10:18:02,474 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for d2fcaf53b78370d16571065501f9880b: 2024-12-06T10:18:02,474 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d2fcaf53b78370d16571065501f9880b:A, priority=-2147483648, current under compaction store size is 1 2024-12-06T10:18:02,474 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T10:18:02,474 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d2fcaf53b78370d16571065501f9880b:B, priority=-2147483648, current under compaction store size is 2 2024-12-06T10:18:02,474 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-06T10:18:02,474 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T10:18:02,474 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d2fcaf53b78370d16571065501f9880b:C, priority=-2147483648, current under compaction store size is 3 2024-12-06T10:18:02,474 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-06T10:18:02,474 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-06T10:18:02,475 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37049 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-06T10:18:02,476 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HStore(1540): d2fcaf53b78370d16571065501f9880b/B is initiating minor compaction (all files) 2024-12-06T10:18:02,476 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d2fcaf53b78370d16571065501f9880b/B in TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b. 2024-12-06T10:18:02,476 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 39489 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-06T10:18:02,476 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/B/3e97c830b57c4ba7ba93120998403622, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/B/4fe7587ef4314f42af2af78903edd40c, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/B/8d70e459b4cd4fa0811fb839ca73eceb] into tmpdir=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/.tmp, totalSize=36.2 K 2024-12-06T10:18:02,476 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HStore(1540): d2fcaf53b78370d16571065501f9880b/A is initiating minor compaction (all files) 2024-12-06T10:18:02,476 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d2fcaf53b78370d16571065501f9880b/A in TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b. 2024-12-06T10:18:02,476 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/A/3e65179b747744dc90dcb388a7447dbc, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/A/de4b86fbb53f4682af7f0264e4661050, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/A/81a993ae8ff6493d990e41ceaf280351] into tmpdir=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/.tmp, totalSize=38.6 K 2024-12-06T10:18:02,476 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.Compactor(224): Compacting 3e97c830b57c4ba7ba93120998403622, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=219, earliestPutTs=1733480278656 2024-12-06T10:18:02,477 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.Compactor(224): Compacting 3e65179b747744dc90dcb388a7447dbc, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=219, earliestPutTs=1733480278656 2024-12-06T10:18:02,477 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.Compactor(224): Compacting 4fe7587ef4314f42af2af78903edd40c, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=244, earliestPutTs=1733480279849 2024-12-06T10:18:02,477 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.Compactor(224): Compacting de4b86fbb53f4682af7f0264e4661050, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=244, earliestPutTs=1733480279849 2024-12-06T10:18:02,477 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.Compactor(224): Compacting 8d70e459b4cd4fa0811fb839ca73eceb, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=259, earliestPutTs=1733480280477 2024-12-06T10:18:02,477 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.Compactor(224): Compacting 81a993ae8ff6493d990e41ceaf280351, keycount=200, bloomtype=ROW, size=14.3 K, encoding=NONE, compression=NONE, seqNum=259, earliestPutTs=1733480280477 2024-12-06T10:18:02,485 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): d2fcaf53b78370d16571065501f9880b#B#compaction#296 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-06T10:18:02,485 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/.tmp/B/5424735984eb44cb90cd6ea439e0c6a2 is 50, key is test_row_0/B:col10/1733480280482/Put/seqid=0 2024-12-06T10:18:02,488 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): d2fcaf53b78370d16571065501f9880b#A#compaction#297 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-06T10:18:02,488 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/.tmp/A/8ce1a53d35d241bfb2066fa64ad2aca2 is 50, key is test_row_0/A:col10/1733480280482/Put/seqid=0 2024-12-06T10:18:02,501 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742172_1348 (size=12849) 2024-12-06T10:18:02,505 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742173_1349 (size=12849) 2024-12-06T10:18:02,598 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,33397,1733480204743 2024-12-06T10:18:02,599 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33397 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=84 2024-12-06T10:18:02,599 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b. 2024-12-06T10:18:02,599 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegion(2837): Flushing d2fcaf53b78370d16571065501f9880b 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-12-06T10:18:02,600 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d2fcaf53b78370d16571065501f9880b, store=A 2024-12-06T10:18:02,600 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:18:02,600 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d2fcaf53b78370d16571065501f9880b, store=B 2024-12-06T10:18:02,600 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:18:02,600 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d2fcaf53b78370d16571065501f9880b, store=C 2024-12-06T10:18:02,600 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:18:02,605 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/.tmp/A/57a2481bb78a4472a5b317cdd61b95aa is 50, key is test_row_0/A:col10/1733480281648/Put/seqid=0 2024-12-06T10:18:02,609 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742174_1350 (size=12301) 2024-12-06T10:18:02,742 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=83 2024-12-06T10:18:02,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(8581): Flush requested on d2fcaf53b78370d16571065501f9880b 2024-12-06T10:18:02,778 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b. as already flushing 2024-12-06T10:18:02,801 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2fcaf53b78370d16571065501f9880b, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:02,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 280 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48160 deadline: 1733480342800, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2fcaf53b78370d16571065501f9880b, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:02,905 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2fcaf53b78370d16571065501f9880b, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:02,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 282 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48160 deadline: 1733480342903, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2fcaf53b78370d16571065501f9880b, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:02,906 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/.tmp/B/5424735984eb44cb90cd6ea439e0c6a2 as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/B/5424735984eb44cb90cd6ea439e0c6a2 2024-12-06T10:18:02,911 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/.tmp/A/8ce1a53d35d241bfb2066fa64ad2aca2 as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/A/8ce1a53d35d241bfb2066fa64ad2aca2 2024-12-06T10:18:02,911 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in d2fcaf53b78370d16571065501f9880b/B of d2fcaf53b78370d16571065501f9880b into 5424735984eb44cb90cd6ea439e0c6a2(size=12.5 K), total size for store is 12.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-06T10:18:02,911 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d2fcaf53b78370d16571065501f9880b: 2024-12-06T10:18:02,911 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b., storeName=d2fcaf53b78370d16571065501f9880b/B, priority=13, startTime=1733480282474; duration=0sec 2024-12-06T10:18:02,911 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-06T10:18:02,912 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d2fcaf53b78370d16571065501f9880b:B 2024-12-06T10:18:02,912 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-06T10:18:02,913 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37049 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-06T10:18:02,913 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HStore(1540): d2fcaf53b78370d16571065501f9880b/C is initiating minor compaction (all files) 2024-12-06T10:18:02,913 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d2fcaf53b78370d16571065501f9880b/C in TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b. 2024-12-06T10:18:02,913 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/C/d5364d6984d042c693510973f2847583, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/C/a9ffef46b15745dc9f224adfa976051d, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/C/3956c661bff84474b73ce38db2ac27d8] into tmpdir=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/.tmp, totalSize=36.2 K 2024-12-06T10:18:02,914 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.Compactor(224): Compacting d5364d6984d042c693510973f2847583, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=219, earliestPutTs=1733480278656 2024-12-06T10:18:02,915 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.Compactor(224): Compacting a9ffef46b15745dc9f224adfa976051d, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=244, earliestPutTs=1733480279849 2024-12-06T10:18:02,915 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.Compactor(224): Compacting 3956c661bff84474b73ce38db2ac27d8, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=259, earliestPutTs=1733480280477 2024-12-06T10:18:02,916 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in d2fcaf53b78370d16571065501f9880b/A of d2fcaf53b78370d16571065501f9880b into 8ce1a53d35d241bfb2066fa64ad2aca2(size=12.5 K), total size for store is 12.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-06T10:18:02,916 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d2fcaf53b78370d16571065501f9880b: 2024-12-06T10:18:02,916 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b., storeName=d2fcaf53b78370d16571065501f9880b/A, priority=13, startTime=1733480282474; duration=0sec 2024-12-06T10:18:02,916 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T10:18:02,916 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d2fcaf53b78370d16571065501f9880b:A 2024-12-06T10:18:02,923 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): d2fcaf53b78370d16571065501f9880b#C#compaction#299 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-06T10:18:02,924 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/.tmp/C/64978c8a6c57499d9dc6a8c489558260 is 50, key is test_row_0/C:col10/1733480280482/Put/seqid=0 2024-12-06T10:18:02,930 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742175_1351 (size=12849) 2024-12-06T10:18:02,936 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/.tmp/C/64978c8a6c57499d9dc6a8c489558260 as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/C/64978c8a6c57499d9dc6a8c489558260 2024-12-06T10:18:02,942 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in d2fcaf53b78370d16571065501f9880b/C of d2fcaf53b78370d16571065501f9880b into 64978c8a6c57499d9dc6a8c489558260(size=12.5 K), total size for store is 12.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-06T10:18:02,942 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d2fcaf53b78370d16571065501f9880b: 2024-12-06T10:18:02,942 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b., storeName=d2fcaf53b78370d16571065501f9880b/C, priority=13, startTime=1733480282474; duration=0sec 2024-12-06T10:18:02,942 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T10:18:02,942 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d2fcaf53b78370d16571065501f9880b:C 2024-12-06T10:18:03,009 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=283 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/.tmp/A/57a2481bb78a4472a5b317cdd61b95aa 2024-12-06T10:18:03,018 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/.tmp/B/73b70a4a1a794274bfa1f5c897df123a is 50, key is test_row_0/B:col10/1733480281648/Put/seqid=0 2024-12-06T10:18:03,022 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742176_1352 (size=12301) 2024-12-06T10:18:03,108 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2fcaf53b78370d16571065501f9880b, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:03,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 284 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48160 deadline: 1733480343107, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2fcaf53b78370d16571065501f9880b, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:03,243 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=83 2024-12-06T10:18:03,410 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2fcaf53b78370d16571065501f9880b, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:03,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 286 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48160 deadline: 1733480343409, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2fcaf53b78370d16571065501f9880b, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:03,423 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=283 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/.tmp/B/73b70a4a1a794274bfa1f5c897df123a 2024-12-06T10:18:03,454 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/.tmp/C/fdc2cd6c21da45fd8cba709c70074e47 is 50, key is test_row_0/C:col10/1733480281648/Put/seqid=0 2024-12-06T10:18:03,465 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742177_1353 (size=12301) 2024-12-06T10:18:03,465 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=283 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/.tmp/C/fdc2cd6c21da45fd8cba709c70074e47 2024-12-06T10:18:03,470 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/.tmp/A/57a2481bb78a4472a5b317cdd61b95aa as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/A/57a2481bb78a4472a5b317cdd61b95aa 2024-12-06T10:18:03,474 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/A/57a2481bb78a4472a5b317cdd61b95aa, entries=150, sequenceid=283, filesize=12.0 K 2024-12-06T10:18:03,475 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/.tmp/B/73b70a4a1a794274bfa1f5c897df123a as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/B/73b70a4a1a794274bfa1f5c897df123a 2024-12-06T10:18:03,479 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/B/73b70a4a1a794274bfa1f5c897df123a, entries=150, sequenceid=283, filesize=12.0 K 2024-12-06T10:18:03,480 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/.tmp/C/fdc2cd6c21da45fd8cba709c70074e47 as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/C/fdc2cd6c21da45fd8cba709c70074e47 2024-12-06T10:18:03,484 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/C/fdc2cd6c21da45fd8cba709c70074e47, entries=150, sequenceid=283, filesize=12.0 K 2024-12-06T10:18:03,485 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=60.38 KB/61830 for d2fcaf53b78370d16571065501f9880b in 886ms, sequenceid=283, compaction requested=false 2024-12-06T10:18:03,485 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegion(2538): Flush status journal for d2fcaf53b78370d16571065501f9880b: 2024-12-06T10:18:03,485 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b. 2024-12-06T10:18:03,485 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=84 2024-12-06T10:18:03,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.HMaster(4106): Remote procedure done, pid=84 2024-12-06T10:18:03,487 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=84, resume processing ppid=83 2024-12-06T10:18:03,487 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=84, ppid=83, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.3450 sec 2024-12-06T10:18:03,489 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=83, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=83, table=TestAcidGuarantees in 1.3500 sec 2024-12-06T10:18:03,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(8581): Flush requested on d2fcaf53b78370d16571065501f9880b 2024-12-06T10:18:03,916 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing d2fcaf53b78370d16571065501f9880b 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-12-06T10:18:03,916 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d2fcaf53b78370d16571065501f9880b, store=A 2024-12-06T10:18:03,917 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:18:03,917 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d2fcaf53b78370d16571065501f9880b, store=B 2024-12-06T10:18:03,917 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:18:03,917 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d2fcaf53b78370d16571065501f9880b, store=C 2024-12-06T10:18:03,917 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:18:03,929 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/.tmp/A/1a383231d6904ffe8d9e0ac10941cbc2 is 50, key is test_row_0/A:col10/1733480282792/Put/seqid=0 2024-12-06T10:18:03,936 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742178_1354 (size=12301) 2024-12-06T10:18:03,976 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2fcaf53b78370d16571065501f9880b, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:03,976 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 309 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48160 deadline: 1733480343974, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2fcaf53b78370d16571065501f9880b, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:04,078 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2fcaf53b78370d16571065501f9880b, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:04,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 311 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48160 deadline: 1733480344077, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2fcaf53b78370d16571065501f9880b, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:04,243 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=83 2024-12-06T10:18:04,244 INFO [Thread-1321 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 83 completed 2024-12-06T10:18:04,245 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-06T10:18:04,246 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] procedure2.ProcedureExecutor(1098): Stored pid=85, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=85, table=TestAcidGuarantees 2024-12-06T10:18:04,246 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=85 2024-12-06T10:18:04,246 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=85, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=85, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-06T10:18:04,247 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=85, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=85, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-06T10:18:04,247 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=86, ppid=85, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-06T10:18:04,282 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2fcaf53b78370d16571065501f9880b, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:04,282 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 313 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48160 deadline: 1733480344280, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2fcaf53b78370d16571065501f9880b, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:04,336 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=299 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/.tmp/A/1a383231d6904ffe8d9e0ac10941cbc2 2024-12-06T10:18:04,346 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/.tmp/B/143ebfc7f0714c23ba767d1891bc8467 is 50, key is test_row_0/B:col10/1733480282792/Put/seqid=0 2024-12-06T10:18:04,360 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=85 2024-12-06T10:18:04,362 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742179_1355 (size=12301) 2024-12-06T10:18:04,399 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,33397,1733480204743 2024-12-06T10:18:04,399 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33397 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=86 2024-12-06T10:18:04,399 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b. 2024-12-06T10:18:04,399 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b. as already flushing 2024-12-06T10:18:04,399 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b. 2024-12-06T10:18:04,399 ERROR [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] handler.RSProcedureHandler(58): pid=86 java.io.IOException: Unable to complete flush {ENCODED => d2fcaf53b78370d16571065501f9880b, NAME => 'TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:18:04,400 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=86 java.io.IOException: Unable to complete flush {ENCODED => d2fcaf53b78370d16571065501f9880b, NAME => 'TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:18:04,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.HMaster(4114): Remote procedure failed, pid=86 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d2fcaf53b78370d16571065501f9880b, NAME => 'TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d2fcaf53b78370d16571065501f9880b, NAME => 'TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:18:04,551 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,33397,1733480204743 2024-12-06T10:18:04,552 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33397 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=86 2024-12-06T10:18:04,552 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b. 2024-12-06T10:18:04,552 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b. as already flushing 2024-12-06T10:18:04,552 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b. 2024-12-06T10:18:04,552 ERROR [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] handler.RSProcedureHandler(58): pid=86 java.io.IOException: Unable to complete flush {ENCODED => d2fcaf53b78370d16571065501f9880b, NAME => 'TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:18:04,552 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=86 java.io.IOException: Unable to complete flush {ENCODED => d2fcaf53b78370d16571065501f9880b, NAME => 'TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:18:04,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.HMaster(4114): Remote procedure failed, pid=86 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d2fcaf53b78370d16571065501f9880b, NAME => 'TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d2fcaf53b78370d16571065501f9880b, NAME => 'TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:18:04,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=85 2024-12-06T10:18:04,585 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2fcaf53b78370d16571065501f9880b, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:04,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 315 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48160 deadline: 1733480344583, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2fcaf53b78370d16571065501f9880b, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:04,704 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,33397,1733480204743 2024-12-06T10:18:04,705 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33397 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=86 2024-12-06T10:18:04,705 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b. 2024-12-06T10:18:04,705 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b. as already flushing 2024-12-06T10:18:04,705 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b. 2024-12-06T10:18:04,705 ERROR [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] handler.RSProcedureHandler(58): pid=86 java.io.IOException: Unable to complete flush {ENCODED => d2fcaf53b78370d16571065501f9880b, NAME => 'TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:18:04,705 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=86 java.io.IOException: Unable to complete flush {ENCODED => d2fcaf53b78370d16571065501f9880b, NAME => 'TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:18:04,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.HMaster(4114): Remote procedure failed, pid=86 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d2fcaf53b78370d16571065501f9880b, NAME => 'TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d2fcaf53b78370d16571065501f9880b, NAME => 'TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:18:04,763 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=299 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/.tmp/B/143ebfc7f0714c23ba767d1891bc8467 2024-12-06T10:18:04,770 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/.tmp/C/5fd189542edb4c67a468a1578c72128c is 50, key is test_row_0/C:col10/1733480282792/Put/seqid=0 2024-12-06T10:18:04,774 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742180_1356 (size=12301) 2024-12-06T10:18:04,857 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,33397,1733480204743 2024-12-06T10:18:04,858 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33397 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=86 2024-12-06T10:18:04,858 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b. 2024-12-06T10:18:04,858 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b. as already flushing 2024-12-06T10:18:04,858 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b. 2024-12-06T10:18:04,858 ERROR [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] handler.RSProcedureHandler(58): pid=86 java.io.IOException: Unable to complete flush {ENCODED => d2fcaf53b78370d16571065501f9880b, NAME => 'TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:18:04,858 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=86 java.io.IOException: Unable to complete flush {ENCODED => d2fcaf53b78370d16571065501f9880b, NAME => 'TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:18:04,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.HMaster(4114): Remote procedure failed, pid=86 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d2fcaf53b78370d16571065501f9880b, NAME => 'TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d2fcaf53b78370d16571065501f9880b, NAME => 'TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:18:04,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=85 2024-12-06T10:18:05,010 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,33397,1733480204743 2024-12-06T10:18:05,011 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33397 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=86 2024-12-06T10:18:05,011 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b. 2024-12-06T10:18:05,011 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b. as already flushing 2024-12-06T10:18:05,011 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b. 2024-12-06T10:18:05,011 ERROR [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] handler.RSProcedureHandler(58): pid=86 java.io.IOException: Unable to complete flush {ENCODED => d2fcaf53b78370d16571065501f9880b, NAME => 'TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:18:05,012 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=86 java.io.IOException: Unable to complete flush {ENCODED => d2fcaf53b78370d16571065501f9880b, NAME => 'TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:18:05,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.HMaster(4114): Remote procedure failed, pid=86 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d2fcaf53b78370d16571065501f9880b, NAME => 'TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d2fcaf53b78370d16571065501f9880b, NAME => 'TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:18:05,090 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2fcaf53b78370d16571065501f9880b, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:05,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 317 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48160 deadline: 1733480345088, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2fcaf53b78370d16571065501f9880b, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:05,163 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,33397,1733480204743 2024-12-06T10:18:05,164 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33397 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=86 2024-12-06T10:18:05,164 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b. 2024-12-06T10:18:05,164 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b. as already flushing 2024-12-06T10:18:05,164 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b. 2024-12-06T10:18:05,164 ERROR [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] handler.RSProcedureHandler(58): pid=86 java.io.IOException: Unable to complete flush {ENCODED => d2fcaf53b78370d16571065501f9880b, NAME => 'TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:18:05,164 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=86 java.io.IOException: Unable to complete flush {ENCODED => d2fcaf53b78370d16571065501f9880b, NAME => 'TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:18:05,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.HMaster(4114): Remote procedure failed, pid=86 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d2fcaf53b78370d16571065501f9880b, NAME => 'TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d2fcaf53b78370d16571065501f9880b, NAME => 'TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:18:05,175 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=299 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/.tmp/C/5fd189542edb4c67a468a1578c72128c 2024-12-06T10:18:05,179 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/.tmp/A/1a383231d6904ffe8d9e0ac10941cbc2 as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/A/1a383231d6904ffe8d9e0ac10941cbc2 2024-12-06T10:18:05,183 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/A/1a383231d6904ffe8d9e0ac10941cbc2, entries=150, sequenceid=299, filesize=12.0 K 2024-12-06T10:18:05,183 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/.tmp/B/143ebfc7f0714c23ba767d1891bc8467 as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/B/143ebfc7f0714c23ba767d1891bc8467 2024-12-06T10:18:05,187 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/B/143ebfc7f0714c23ba767d1891bc8467, entries=150, sequenceid=299, filesize=12.0 K 2024-12-06T10:18:05,188 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/.tmp/C/5fd189542edb4c67a468a1578c72128c as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/C/5fd189542edb4c67a468a1578c72128c 2024-12-06T10:18:05,191 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/C/5fd189542edb4c67a468a1578c72128c, entries=150, sequenceid=299, filesize=12.0 K 2024-12-06T10:18:05,192 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=134.18 KB/137400 for d2fcaf53b78370d16571065501f9880b in 1276ms, sequenceid=299, compaction requested=true 2024-12-06T10:18:05,192 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for d2fcaf53b78370d16571065501f9880b: 2024-12-06T10:18:05,192 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d2fcaf53b78370d16571065501f9880b:A, priority=-2147483648, current under compaction store size is 1 2024-12-06T10:18:05,193 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T10:18:05,193 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d2fcaf53b78370d16571065501f9880b:B, priority=-2147483648, current under compaction store size is 2 2024-12-06T10:18:05,193 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-06T10:18:05,193 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-06T10:18:05,193 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d2fcaf53b78370d16571065501f9880b:C, priority=-2147483648, current under compaction store size is 3 2024-12-06T10:18:05,193 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-12-06T10:18:05,193 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-06T10:18:05,193 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37451 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-06T10:18:05,194 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HStore(1540): d2fcaf53b78370d16571065501f9880b/A is initiating minor compaction (all files) 2024-12-06T10:18:05,194 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37451 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-06T10:18:05,194 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d2fcaf53b78370d16571065501f9880b/A in TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b. 2024-12-06T10:18:05,194 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HStore(1540): d2fcaf53b78370d16571065501f9880b/B is initiating minor compaction (all files) 2024-12-06T10:18:05,194 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d2fcaf53b78370d16571065501f9880b/B in TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b. 2024-12-06T10:18:05,194 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/A/8ce1a53d35d241bfb2066fa64ad2aca2, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/A/57a2481bb78a4472a5b317cdd61b95aa, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/A/1a383231d6904ffe8d9e0ac10941cbc2] into tmpdir=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/.tmp, totalSize=36.6 K 2024-12-06T10:18:05,194 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/B/5424735984eb44cb90cd6ea439e0c6a2, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/B/73b70a4a1a794274bfa1f5c897df123a, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/B/143ebfc7f0714c23ba767d1891bc8467] into tmpdir=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/.tmp, totalSize=36.6 K 2024-12-06T10:18:05,194 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.Compactor(224): Compacting 8ce1a53d35d241bfb2066fa64ad2aca2, keycount=150, bloomtype=ROW, size=12.5 K, encoding=NONE, compression=NONE, seqNum=259, earliestPutTs=1733480280477 2024-12-06T10:18:05,194 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.Compactor(224): Compacting 5424735984eb44cb90cd6ea439e0c6a2, keycount=150, bloomtype=ROW, size=12.5 K, encoding=NONE, compression=NONE, seqNum=259, earliestPutTs=1733480280477 2024-12-06T10:18:05,195 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.Compactor(224): Compacting 57a2481bb78a4472a5b317cdd61b95aa, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=283, earliestPutTs=1733480281648 2024-12-06T10:18:05,195 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.Compactor(224): Compacting 73b70a4a1a794274bfa1f5c897df123a, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=283, earliestPutTs=1733480281648 2024-12-06T10:18:05,195 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1a383231d6904ffe8d9e0ac10941cbc2, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=299, earliestPutTs=1733480282792 2024-12-06T10:18:05,195 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.Compactor(224): Compacting 143ebfc7f0714c23ba767d1891bc8467, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=299, earliestPutTs=1733480282792 2024-12-06T10:18:05,207 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): d2fcaf53b78370d16571065501f9880b#A#compaction#305 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-06T10:18:05,207 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/.tmp/A/db595ed82f7840618906dbbb66072c58 is 50, key is test_row_0/A:col10/1733480282792/Put/seqid=0 2024-12-06T10:18:05,210 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): d2fcaf53b78370d16571065501f9880b#B#compaction#306 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-06T10:18:05,210 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/.tmp/B/cbb1c0517fb2402f89b8234250228e69 is 50, key is test_row_0/B:col10/1733480282792/Put/seqid=0 2024-12-06T10:18:05,211 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742181_1357 (size=13051) 2024-12-06T10:18:05,216 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/.tmp/A/db595ed82f7840618906dbbb66072c58 as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/A/db595ed82f7840618906dbbb66072c58 2024-12-06T10:18:05,220 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in d2fcaf53b78370d16571065501f9880b/A of d2fcaf53b78370d16571065501f9880b into db595ed82f7840618906dbbb66072c58(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-06T10:18:05,221 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d2fcaf53b78370d16571065501f9880b: 2024-12-06T10:18:05,221 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b., storeName=d2fcaf53b78370d16571065501f9880b/A, priority=13, startTime=1733480285192; duration=0sec 2024-12-06T10:18:05,221 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742182_1358 (size=13051) 2024-12-06T10:18:05,221 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-06T10:18:05,221 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d2fcaf53b78370d16571065501f9880b:A 2024-12-06T10:18:05,221 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-06T10:18:05,222 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37451 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-06T10:18:05,222 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HStore(1540): d2fcaf53b78370d16571065501f9880b/C is initiating minor compaction (all files) 2024-12-06T10:18:05,222 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d2fcaf53b78370d16571065501f9880b/C in TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b. 2024-12-06T10:18:05,222 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/C/64978c8a6c57499d9dc6a8c489558260, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/C/fdc2cd6c21da45fd8cba709c70074e47, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/C/5fd189542edb4c67a468a1578c72128c] into tmpdir=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/.tmp, totalSize=36.6 K 2024-12-06T10:18:05,223 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.Compactor(224): Compacting 64978c8a6c57499d9dc6a8c489558260, keycount=150, bloomtype=ROW, size=12.5 K, encoding=NONE, compression=NONE, seqNum=259, earliestPutTs=1733480280477 2024-12-06T10:18:05,223 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.Compactor(224): Compacting fdc2cd6c21da45fd8cba709c70074e47, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=283, earliestPutTs=1733480281648 2024-12-06T10:18:05,223 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.Compactor(224): Compacting 5fd189542edb4c67a468a1578c72128c, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=299, earliestPutTs=1733480282792 2024-12-06T10:18:05,229 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): d2fcaf53b78370d16571065501f9880b#C#compaction#307 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-06T10:18:05,229 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/.tmp/C/47480a6b8dd3418dae645bcd7f339632 is 50, key is test_row_0/C:col10/1733480282792/Put/seqid=0 2024-12-06T10:18:05,232 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742183_1359 (size=13051) 2024-12-06T10:18:05,237 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/.tmp/C/47480a6b8dd3418dae645bcd7f339632 as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/C/47480a6b8dd3418dae645bcd7f339632 2024-12-06T10:18:05,243 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in d2fcaf53b78370d16571065501f9880b/C of d2fcaf53b78370d16571065501f9880b into 47480a6b8dd3418dae645bcd7f339632(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-06T10:18:05,243 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d2fcaf53b78370d16571065501f9880b: 2024-12-06T10:18:05,243 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b., storeName=d2fcaf53b78370d16571065501f9880b/C, priority=13, startTime=1733480285193; duration=0sec 2024-12-06T10:18:05,243 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T10:18:05,243 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d2fcaf53b78370d16571065501f9880b:C 2024-12-06T10:18:05,317 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,33397,1733480204743 2024-12-06T10:18:05,318 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33397 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=86 2024-12-06T10:18:05,318 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b. 2024-12-06T10:18:05,318 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegion(2837): Flushing d2fcaf53b78370d16571065501f9880b 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-12-06T10:18:05,318 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d2fcaf53b78370d16571065501f9880b, store=A 2024-12-06T10:18:05,319 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:18:05,319 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d2fcaf53b78370d16571065501f9880b, store=B 2024-12-06T10:18:05,319 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:18:05,319 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d2fcaf53b78370d16571065501f9880b, store=C 2024-12-06T10:18:05,319 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:18:05,324 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/.tmp/A/60d3e6f72a64444c99021150c8ebb73e is 50, key is test_row_0/A:col10/1733480283960/Put/seqid=0 2024-12-06T10:18:05,335 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742184_1360 (size=12301) 2024-12-06T10:18:05,337 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=324 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/.tmp/A/60d3e6f72a64444c99021150c8ebb73e 2024-12-06T10:18:05,349 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/.tmp/B/c32ae520826f4cc9978467326ab0f4c2 is 50, key is test_row_0/B:col10/1733480283960/Put/seqid=0 2024-12-06T10:18:05,356 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742185_1361 (size=12301) 2024-12-06T10:18:05,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=85 2024-12-06T10:18:05,627 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/.tmp/B/cbb1c0517fb2402f89b8234250228e69 as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/B/cbb1c0517fb2402f89b8234250228e69 2024-12-06T10:18:05,631 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in d2fcaf53b78370d16571065501f9880b/B of d2fcaf53b78370d16571065501f9880b into cbb1c0517fb2402f89b8234250228e69(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-06T10:18:05,631 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d2fcaf53b78370d16571065501f9880b: 2024-12-06T10:18:05,631 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b., storeName=d2fcaf53b78370d16571065501f9880b/B, priority=13, startTime=1733480285193; duration=0sec 2024-12-06T10:18:05,631 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T10:18:05,631 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d2fcaf53b78370d16571065501f9880b:B 2024-12-06T10:18:05,757 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=324 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/.tmp/B/c32ae520826f4cc9978467326ab0f4c2 2024-12-06T10:18:05,765 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/.tmp/C/42edaba8a4f94ce9a79c89ff97eea1f5 is 50, key is test_row_0/C:col10/1733480283960/Put/seqid=0 2024-12-06T10:18:05,771 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742186_1362 (size=12301) 2024-12-06T10:18:05,772 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=324 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/.tmp/C/42edaba8a4f94ce9a79c89ff97eea1f5 2024-12-06T10:18:05,777 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/.tmp/A/60d3e6f72a64444c99021150c8ebb73e as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/A/60d3e6f72a64444c99021150c8ebb73e 2024-12-06T10:18:05,781 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/A/60d3e6f72a64444c99021150c8ebb73e, entries=150, sequenceid=324, filesize=12.0 K 2024-12-06T10:18:05,782 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/.tmp/B/c32ae520826f4cc9978467326ab0f4c2 as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/B/c32ae520826f4cc9978467326ab0f4c2 2024-12-06T10:18:05,798 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/B/c32ae520826f4cc9978467326ab0f4c2, entries=150, sequenceid=324, filesize=12.0 K 2024-12-06T10:18:05,799 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/.tmp/C/42edaba8a4f94ce9a79c89ff97eea1f5 as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/C/42edaba8a4f94ce9a79c89ff97eea1f5 2024-12-06T10:18:05,805 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/C/42edaba8a4f94ce9a79c89ff97eea1f5, entries=150, sequenceid=324, filesize=12.0 K 2024-12-06T10:18:05,806 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=0 B/0 for d2fcaf53b78370d16571065501f9880b in 488ms, sequenceid=324, compaction requested=false 2024-12-06T10:18:05,806 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegion(2538): Flush status journal for d2fcaf53b78370d16571065501f9880b: 2024-12-06T10:18:05,806 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b. 2024-12-06T10:18:05,806 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=86 2024-12-06T10:18:05,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.HMaster(4106): Remote procedure done, pid=86 2024-12-06T10:18:05,810 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=86, resume processing ppid=85 2024-12-06T10:18:05,810 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=86, ppid=85, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.5610 sec 2024-12-06T10:18:05,811 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=85, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=85, table=TestAcidGuarantees in 1.5650 sec 2024-12-06T10:18:06,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(8581): Flush requested on d2fcaf53b78370d16571065501f9880b 2024-12-06T10:18:06,112 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing d2fcaf53b78370d16571065501f9880b 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-06T10:18:06,112 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d2fcaf53b78370d16571065501f9880b, store=A 2024-12-06T10:18:06,112 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:18:06,112 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d2fcaf53b78370d16571065501f9880b, store=B 2024-12-06T10:18:06,112 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:18:06,112 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d2fcaf53b78370d16571065501f9880b, store=C 2024-12-06T10:18:06,112 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:18:06,116 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/.tmp/A/60ed4899d70347069f82821b5d63c3ed is 50, key is test_row_0/A:col10/1733480286105/Put/seqid=0 2024-12-06T10:18:06,121 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742187_1363 (size=12301) 2024-12-06T10:18:06,165 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2fcaf53b78370d16571065501f9880b, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:06,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 349 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48160 deadline: 1733480346163, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2fcaf53b78370d16571065501f9880b, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:06,268 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2fcaf53b78370d16571065501f9880b, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:06,268 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 351 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48160 deadline: 1733480346266, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2fcaf53b78370d16571065501f9880b, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:06,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=85 2024-12-06T10:18:06,363 INFO [Thread-1321 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 85 completed 2024-12-06T10:18:06,365 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-06T10:18:06,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] procedure2.ProcedureExecutor(1098): Stored pid=87, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=87, table=TestAcidGuarantees 2024-12-06T10:18:06,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=87 2024-12-06T10:18:06,367 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=87, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=87, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-06T10:18:06,367 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=87, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=87, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-06T10:18:06,367 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=88, ppid=87, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-06T10:18:06,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=87 2024-12-06T10:18:06,472 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2fcaf53b78370d16571065501f9880b, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:06,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 353 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48160 deadline: 1733480346470, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2fcaf53b78370d16571065501f9880b, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:06,519 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,33397,1733480204743 2024-12-06T10:18:06,519 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33397 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=88 2024-12-06T10:18:06,520 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b. 2024-12-06T10:18:06,520 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b. as already flushing 2024-12-06T10:18:06,520 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b. 2024-12-06T10:18:06,520 ERROR [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] handler.RSProcedureHandler(58): pid=88 java.io.IOException: Unable to complete flush {ENCODED => d2fcaf53b78370d16571065501f9880b, NAME => 'TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:18:06,520 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=88 java.io.IOException: Unable to complete flush {ENCODED => d2fcaf53b78370d16571065501f9880b, NAME => 'TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:18:06,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.HMaster(4114): Remote procedure failed, pid=88 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d2fcaf53b78370d16571065501f9880b, NAME => 'TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d2fcaf53b78370d16571065501f9880b, NAME => 'TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:18:06,522 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=336 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/.tmp/A/60ed4899d70347069f82821b5d63c3ed 2024-12-06T10:18:06,529 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/.tmp/B/da3fab07135140f9a76d3c36edda0c55 is 50, key is test_row_0/B:col10/1733480286105/Put/seqid=0 2024-12-06T10:18:06,533 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742188_1364 (size=12301) 2024-12-06T10:18:06,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=87 2024-12-06T10:18:06,672 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,33397,1733480204743 2024-12-06T10:18:06,672 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33397 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=88 2024-12-06T10:18:06,673 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b. 2024-12-06T10:18:06,673 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b. as already flushing 2024-12-06T10:18:06,673 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b. 2024-12-06T10:18:06,673 ERROR [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] handler.RSProcedureHandler(58): pid=88 java.io.IOException: Unable to complete flush {ENCODED => d2fcaf53b78370d16571065501f9880b, NAME => 'TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:18:06,673 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=88 java.io.IOException: Unable to complete flush {ENCODED => d2fcaf53b78370d16571065501f9880b, NAME => 'TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:18:06,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.HMaster(4114): Remote procedure failed, pid=88 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d2fcaf53b78370d16571065501f9880b, NAME => 'TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d2fcaf53b78370d16571065501f9880b, NAME => 'TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:18:06,775 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2fcaf53b78370d16571065501f9880b, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:06,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 355 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48160 deadline: 1733480346773, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2fcaf53b78370d16571065501f9880b, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:06,825 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,33397,1733480204743 2024-12-06T10:18:06,825 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33397 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=88 2024-12-06T10:18:06,825 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b. 2024-12-06T10:18:06,825 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b. as already flushing 2024-12-06T10:18:06,825 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b. 2024-12-06T10:18:06,825 ERROR [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] handler.RSProcedureHandler(58): pid=88 java.io.IOException: Unable to complete flush {ENCODED => d2fcaf53b78370d16571065501f9880b, NAME => 'TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:18:06,826 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=88 java.io.IOException: Unable to complete flush {ENCODED => d2fcaf53b78370d16571065501f9880b, NAME => 'TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:18:06,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.HMaster(4114): Remote procedure failed, pid=88 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d2fcaf53b78370d16571065501f9880b, NAME => 'TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d2fcaf53b78370d16571065501f9880b, NAME => 'TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:18:06,934 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=336 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/.tmp/B/da3fab07135140f9a76d3c36edda0c55 2024-12-06T10:18:06,943 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/.tmp/C/5a6cbb35aadc483abdca1eebeed2e6ac is 50, key is test_row_0/C:col10/1733480286105/Put/seqid=0 2024-12-06T10:18:06,947 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742189_1365 (size=12301) 2024-12-06T10:18:06,949 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=336 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/.tmp/C/5a6cbb35aadc483abdca1eebeed2e6ac 2024-12-06T10:18:06,959 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/.tmp/A/60ed4899d70347069f82821b5d63c3ed as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/A/60ed4899d70347069f82821b5d63c3ed 2024-12-06T10:18:06,963 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/A/60ed4899d70347069f82821b5d63c3ed, entries=150, sequenceid=336, filesize=12.0 K 2024-12-06T10:18:06,964 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/.tmp/B/da3fab07135140f9a76d3c36edda0c55 as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/B/da3fab07135140f9a76d3c36edda0c55 2024-12-06T10:18:06,968 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/B/da3fab07135140f9a76d3c36edda0c55, entries=150, sequenceid=336, filesize=12.0 K 2024-12-06T10:18:06,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=87 2024-12-06T10:18:06,969 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/.tmp/C/5a6cbb35aadc483abdca1eebeed2e6ac as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/C/5a6cbb35aadc483abdca1eebeed2e6ac 2024-12-06T10:18:06,974 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/C/5a6cbb35aadc483abdca1eebeed2e6ac, entries=150, sequenceid=336, filesize=12.0 K 2024-12-06T10:18:06,975 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for d2fcaf53b78370d16571065501f9880b in 862ms, sequenceid=336, compaction requested=true 2024-12-06T10:18:06,975 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for d2fcaf53b78370d16571065501f9880b: 2024-12-06T10:18:06,975 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d2fcaf53b78370d16571065501f9880b:A, priority=-2147483648, current under compaction store size is 1 2024-12-06T10:18:06,975 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T10:18:06,975 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-06T10:18:06,975 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-06T10:18:06,975 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d2fcaf53b78370d16571065501f9880b:B, priority=-2147483648, current under compaction store size is 2 2024-12-06T10:18:06,975 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T10:18:06,976 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37653 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-06T10:18:06,976 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37653 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-06T10:18:06,976 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HStore(1540): d2fcaf53b78370d16571065501f9880b/A is initiating minor compaction (all files) 2024-12-06T10:18:06,976 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HStore(1540): d2fcaf53b78370d16571065501f9880b/B is initiating minor compaction (all files) 2024-12-06T10:18:06,976 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d2fcaf53b78370d16571065501f9880b/A in TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b. 2024-12-06T10:18:06,976 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d2fcaf53b78370d16571065501f9880b/B in TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b. 2024-12-06T10:18:06,976 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/A/db595ed82f7840618906dbbb66072c58, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/A/60d3e6f72a64444c99021150c8ebb73e, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/A/60ed4899d70347069f82821b5d63c3ed] into tmpdir=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/.tmp, totalSize=36.8 K 2024-12-06T10:18:06,976 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/B/cbb1c0517fb2402f89b8234250228e69, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/B/c32ae520826f4cc9978467326ab0f4c2, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/B/da3fab07135140f9a76d3c36edda0c55] into tmpdir=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/.tmp, totalSize=36.8 K 2024-12-06T10:18:06,975 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d2fcaf53b78370d16571065501f9880b:C, priority=-2147483648, current under compaction store size is 3 2024-12-06T10:18:06,976 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-06T10:18:06,977 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.Compactor(224): Compacting db595ed82f7840618906dbbb66072c58, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=299, earliestPutTs=1733480282792 2024-12-06T10:18:06,977 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.Compactor(224): Compacting cbb1c0517fb2402f89b8234250228e69, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=299, earliestPutTs=1733480282792 2024-12-06T10:18:06,977 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,33397,1733480204743 2024-12-06T10:18:06,978 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.Compactor(224): Compacting 60d3e6f72a64444c99021150c8ebb73e, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=324, earliestPutTs=1733480283960 2024-12-06T10:18:06,978 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33397 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=88 2024-12-06T10:18:06,978 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.Compactor(224): Compacting c32ae520826f4cc9978467326ab0f4c2, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=324, earliestPutTs=1733480283960 2024-12-06T10:18:06,978 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b. 2024-12-06T10:18:06,978 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HRegion(2837): Flushing d2fcaf53b78370d16571065501f9880b 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-12-06T10:18:06,978 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.Compactor(224): Compacting 60ed4899d70347069f82821b5d63c3ed, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=336, earliestPutTs=1733480286105 2024-12-06T10:18:06,978 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d2fcaf53b78370d16571065501f9880b, store=A 2024-12-06T10:18:06,978 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:18:06,978 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d2fcaf53b78370d16571065501f9880b, store=B 2024-12-06T10:18:06,979 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:18:06,979 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d2fcaf53b78370d16571065501f9880b, store=C 2024-12-06T10:18:06,979 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:18:06,980 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.Compactor(224): Compacting da3fab07135140f9a76d3c36edda0c55, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=336, earliestPutTs=1733480286105 2024-12-06T10:18:06,984 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/.tmp/A/acd0796d1bef4b589c1ed0fa9655fd07 is 50, key is test_row_0/A:col10/1733480286162/Put/seqid=0 2024-12-06T10:18:06,994 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): d2fcaf53b78370d16571065501f9880b#B#compaction#315 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-06T10:18:06,995 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/.tmp/B/4401bacda07a4a0ead744c896fa74916 is 50, key is test_row_0/B:col10/1733480286105/Put/seqid=0 2024-12-06T10:18:06,996 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742190_1366 (size=12301) 2024-12-06T10:18:06,996 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=361 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/.tmp/A/acd0796d1bef4b589c1ed0fa9655fd07 2024-12-06T10:18:07,002 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): d2fcaf53b78370d16571065501f9880b#A#compaction#316 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-06T10:18:07,002 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/.tmp/A/a852214f13cd42d3b8bdd32a4f7cb17c is 50, key is test_row_0/A:col10/1733480286105/Put/seqid=0 2024-12-06T10:18:07,047 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742191_1367 (size=13153) 2024-12-06T10:18:07,060 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/.tmp/B/60b12ed22a2e45beac5599b864b46952 is 50, key is test_row_0/B:col10/1733480286162/Put/seqid=0 2024-12-06T10:18:07,063 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742192_1368 (size=13153) 2024-12-06T10:18:07,066 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742193_1369 (size=12301) 2024-12-06T10:18:07,067 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=361 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/.tmp/B/60b12ed22a2e45beac5599b864b46952 2024-12-06T10:18:07,071 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/.tmp/A/a852214f13cd42d3b8bdd32a4f7cb17c as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/A/a852214f13cd42d3b8bdd32a4f7cb17c 2024-12-06T10:18:07,076 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/.tmp/C/865d89c489074c8db6d6c3824a21fc1f is 50, key is test_row_0/C:col10/1733480286162/Put/seqid=0 2024-12-06T10:18:07,081 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in d2fcaf53b78370d16571065501f9880b/A of d2fcaf53b78370d16571065501f9880b into a852214f13cd42d3b8bdd32a4f7cb17c(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-06T10:18:07,081 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d2fcaf53b78370d16571065501f9880b: 2024-12-06T10:18:07,081 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b., storeName=d2fcaf53b78370d16571065501f9880b/A, priority=13, startTime=1733480286975; duration=0sec 2024-12-06T10:18:07,081 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-06T10:18:07,081 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d2fcaf53b78370d16571065501f9880b:A 2024-12-06T10:18:07,081 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-06T10:18:07,082 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37653 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-06T10:18:07,082 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HStore(1540): d2fcaf53b78370d16571065501f9880b/C is initiating minor compaction (all files) 2024-12-06T10:18:07,082 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d2fcaf53b78370d16571065501f9880b/C in TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b. 2024-12-06T10:18:07,083 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/C/47480a6b8dd3418dae645bcd7f339632, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/C/42edaba8a4f94ce9a79c89ff97eea1f5, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/C/5a6cbb35aadc483abdca1eebeed2e6ac] into tmpdir=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/.tmp, totalSize=36.8 K 2024-12-06T10:18:07,083 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.Compactor(224): Compacting 47480a6b8dd3418dae645bcd7f339632, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=299, earliestPutTs=1733480282792 2024-12-06T10:18:07,083 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.Compactor(224): Compacting 42edaba8a4f94ce9a79c89ff97eea1f5, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=324, earliestPutTs=1733480283960 2024-12-06T10:18:07,084 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.Compactor(224): Compacting 5a6cbb35aadc483abdca1eebeed2e6ac, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=336, earliestPutTs=1733480286105 2024-12-06T10:18:07,100 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742194_1370 (size=12301) 2024-12-06T10:18:07,100 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): d2fcaf53b78370d16571065501f9880b#C#compaction#319 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-06T10:18:07,101 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=361 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/.tmp/C/865d89c489074c8db6d6c3824a21fc1f 2024-12-06T10:18:07,101 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/.tmp/C/cc387f5eadde4d1680e53b7d6698e002 is 50, key is test_row_0/C:col10/1733480286105/Put/seqid=0 2024-12-06T10:18:07,106 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/.tmp/A/acd0796d1bef4b589c1ed0fa9655fd07 as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/A/acd0796d1bef4b589c1ed0fa9655fd07 2024-12-06T10:18:07,111 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/A/acd0796d1bef4b589c1ed0fa9655fd07, entries=150, sequenceid=361, filesize=12.0 K 2024-12-06T10:18:07,112 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/.tmp/B/60b12ed22a2e45beac5599b864b46952 as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/B/60b12ed22a2e45beac5599b864b46952 2024-12-06T10:18:07,117 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/B/60b12ed22a2e45beac5599b864b46952, entries=150, sequenceid=361, filesize=12.0 K 2024-12-06T10:18:07,119 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/.tmp/C/865d89c489074c8db6d6c3824a21fc1f as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/C/865d89c489074c8db6d6c3824a21fc1f 2024-12-06T10:18:07,124 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/C/865d89c489074c8db6d6c3824a21fc1f, entries=150, sequenceid=361, filesize=12.0 K 2024-12-06T10:18:07,125 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=0 B/0 for d2fcaf53b78370d16571065501f9880b in 147ms, sequenceid=361, compaction requested=false 2024-12-06T10:18:07,125 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HRegion(2538): Flush status journal for d2fcaf53b78370d16571065501f9880b: 2024-12-06T10:18:07,125 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b. 2024-12-06T10:18:07,125 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=88 2024-12-06T10:18:07,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.HMaster(4106): Remote procedure done, pid=88 2024-12-06T10:18:07,126 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742195_1371 (size=13153) 2024-12-06T10:18:07,128 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=88, resume processing ppid=87 2024-12-06T10:18:07,128 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=88, ppid=87, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 759 msec 2024-12-06T10:18:07,131 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=87, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=87, table=TestAcidGuarantees in 764 msec 2024-12-06T10:18:07,131 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/.tmp/C/cc387f5eadde4d1680e53b7d6698e002 as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/C/cc387f5eadde4d1680e53b7d6698e002 2024-12-06T10:18:07,137 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in d2fcaf53b78370d16571065501f9880b/C of d2fcaf53b78370d16571065501f9880b into cc387f5eadde4d1680e53b7d6698e002(size=12.8 K), total size for store is 24.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-06T10:18:07,137 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d2fcaf53b78370d16571065501f9880b: 2024-12-06T10:18:07,137 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b., storeName=d2fcaf53b78370d16571065501f9880b/C, priority=13, startTime=1733480286975; duration=0sec 2024-12-06T10:18:07,137 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T10:18:07,137 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d2fcaf53b78370d16571065501f9880b:C 2024-12-06T10:18:07,292 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(8581): Flush requested on d2fcaf53b78370d16571065501f9880b 2024-12-06T10:18:07,292 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing d2fcaf53b78370d16571065501f9880b 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-06T10:18:07,292 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d2fcaf53b78370d16571065501f9880b, store=A 2024-12-06T10:18:07,293 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:18:07,293 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d2fcaf53b78370d16571065501f9880b, store=B 2024-12-06T10:18:07,293 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:18:07,293 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d2fcaf53b78370d16571065501f9880b, store=C 2024-12-06T10:18:07,293 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:18:07,297 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/.tmp/A/70d3db0edaa54f8ea48e1e023cdd2fd0 is 50, key is test_row_0/A:col10/1733480287291/Put/seqid=0 2024-12-06T10:18:07,305 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742196_1372 (size=14741) 2024-12-06T10:18:07,306 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=374 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/.tmp/A/70d3db0edaa54f8ea48e1e023cdd2fd0 2024-12-06T10:18:07,313 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/.tmp/B/b1f1baf2f4a44cb080e1ab0fdfe8fcbc is 50, key is test_row_0/B:col10/1733480287291/Put/seqid=0 2024-12-06T10:18:07,321 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742197_1373 (size=12301) 2024-12-06T10:18:07,322 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=374 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/.tmp/B/b1f1baf2f4a44cb080e1ab0fdfe8fcbc 2024-12-06T10:18:07,329 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/.tmp/C/2231299032634c46a01c033e9412438e is 50, key is test_row_0/C:col10/1733480287291/Put/seqid=0 2024-12-06T10:18:07,332 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742198_1374 (size=12301) 2024-12-06T10:18:07,333 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=374 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/.tmp/C/2231299032634c46a01c033e9412438e 2024-12-06T10:18:07,337 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/.tmp/A/70d3db0edaa54f8ea48e1e023cdd2fd0 as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/A/70d3db0edaa54f8ea48e1e023cdd2fd0 2024-12-06T10:18:07,341 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/A/70d3db0edaa54f8ea48e1e023cdd2fd0, entries=200, sequenceid=374, filesize=14.4 K 2024-12-06T10:18:07,342 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/.tmp/B/b1f1baf2f4a44cb080e1ab0fdfe8fcbc as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/B/b1f1baf2f4a44cb080e1ab0fdfe8fcbc 2024-12-06T10:18:07,345 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/B/b1f1baf2f4a44cb080e1ab0fdfe8fcbc, entries=150, sequenceid=374, filesize=12.0 K 2024-12-06T10:18:07,346 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/.tmp/C/2231299032634c46a01c033e9412438e as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/C/2231299032634c46a01c033e9412438e 2024-12-06T10:18:07,349 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/C/2231299032634c46a01c033e9412438e, entries=150, sequenceid=374, filesize=12.0 K 2024-12-06T10:18:07,350 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=127.47 KB/130530 for d2fcaf53b78370d16571065501f9880b in 58ms, sequenceid=374, compaction requested=true 2024-12-06T10:18:07,350 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for d2fcaf53b78370d16571065501f9880b: 2024-12-06T10:18:07,350 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d2fcaf53b78370d16571065501f9880b:A, priority=-2147483648, current under compaction store size is 2 2024-12-06T10:18:07,350 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T10:18:07,350 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d2fcaf53b78370d16571065501f9880b:B, priority=-2147483648, current under compaction store size is 2 2024-12-06T10:18:07,350 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-06T10:18:07,350 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-06T10:18:07,350 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d2fcaf53b78370d16571065501f9880b:C, priority=-2147483648, current under compaction store size is 3 2024-12-06T10:18:07,350 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-12-06T10:18:07,351 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 40195 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-06T10:18:07,351 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HStore(1540): d2fcaf53b78370d16571065501f9880b/A is initiating minor compaction (all files) 2024-12-06T10:18:07,351 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d2fcaf53b78370d16571065501f9880b/A in TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b. 2024-12-06T10:18:07,351 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/A/a852214f13cd42d3b8bdd32a4f7cb17c, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/A/acd0796d1bef4b589c1ed0fa9655fd07, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/A/70d3db0edaa54f8ea48e1e023cdd2fd0] into tmpdir=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/.tmp, totalSize=39.3 K 2024-12-06T10:18:07,353 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.Compactor(224): Compacting a852214f13cd42d3b8bdd32a4f7cb17c, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=336, earliestPutTs=1733480286105 2024-12-06T10:18:07,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(8581): Flush requested on d2fcaf53b78370d16571065501f9880b 2024-12-06T10:18:07,353 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing d2fcaf53b78370d16571065501f9880b 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-12-06T10:18:07,353 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.Compactor(224): Compacting acd0796d1bef4b589c1ed0fa9655fd07, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=361, earliestPutTs=1733480286157 2024-12-06T10:18:07,353 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d2fcaf53b78370d16571065501f9880b, store=A 2024-12-06T10:18:07,353 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.Compactor(224): Compacting 70d3db0edaa54f8ea48e1e023cdd2fd0, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=374, earliestPutTs=1733480287280 2024-12-06T10:18:07,353 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:18:07,353 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d2fcaf53b78370d16571065501f9880b, store=B 2024-12-06T10:18:07,354 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:18:07,354 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d2fcaf53b78370d16571065501f9880b, store=C 2024-12-06T10:18:07,354 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:18:07,362 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/.tmp/A/71a531f0196b473b9f8d37233720953e is 50, key is test_row_0/A:col10/1733480287352/Put/seqid=0 2024-12-06T10:18:07,364 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): d2fcaf53b78370d16571065501f9880b#A#compaction#324 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-06T10:18:07,364 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/.tmp/A/67d54b9fa4d84b898ba13044e0c7a876 is 50, key is test_row_0/A:col10/1733480287291/Put/seqid=0 2024-12-06T10:18:07,367 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742199_1375 (size=12301) 2024-12-06T10:18:07,369 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742200_1376 (size=13255) 2024-12-06T10:18:07,374 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/.tmp/A/67d54b9fa4d84b898ba13044e0c7a876 as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/A/67d54b9fa4d84b898ba13044e0c7a876 2024-12-06T10:18:07,385 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in d2fcaf53b78370d16571065501f9880b/A of d2fcaf53b78370d16571065501f9880b into 67d54b9fa4d84b898ba13044e0c7a876(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-06T10:18:07,385 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d2fcaf53b78370d16571065501f9880b: 2024-12-06T10:18:07,385 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b., storeName=d2fcaf53b78370d16571065501f9880b/A, priority=13, startTime=1733480287350; duration=0sec 2024-12-06T10:18:07,386 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-12-06T10:18:07,386 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d2fcaf53b78370d16571065501f9880b:A 2024-12-06T10:18:07,386 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 5 store files, 3 compacting, 2 eligible, 16 blocking 2024-12-06T10:18:07,387 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 0 files of size 0 starting at candidate #-1 after considering 0 permutations with 0 in ratio 2024-12-06T10:18:07,387 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.SortedCompactionPolicy(232): Not compacting files because we only have 0 files ready for compaction. Need 3 to initiate. 2024-12-06T10:18:07,387 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.CompactSplit(450): Not compacting TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b. because compaction request was cancelled 2024-12-06T10:18:07,387 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d2fcaf53b78370d16571065501f9880b:B 2024-12-06T10:18:07,387 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-06T10:18:07,389 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37755 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-06T10:18:07,389 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HStore(1540): d2fcaf53b78370d16571065501f9880b/C is initiating minor compaction (all files) 2024-12-06T10:18:07,389 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d2fcaf53b78370d16571065501f9880b/C in TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b. 2024-12-06T10:18:07,389 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/C/cc387f5eadde4d1680e53b7d6698e002, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/C/865d89c489074c8db6d6c3824a21fc1f, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/C/2231299032634c46a01c033e9412438e] into tmpdir=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/.tmp, totalSize=36.9 K 2024-12-06T10:18:07,389 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.Compactor(224): Compacting cc387f5eadde4d1680e53b7d6698e002, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=336, earliestPutTs=1733480286105 2024-12-06T10:18:07,390 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.Compactor(224): Compacting 865d89c489074c8db6d6c3824a21fc1f, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=361, earliestPutTs=1733480286157 2024-12-06T10:18:07,390 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.Compactor(224): Compacting 2231299032634c46a01c033e9412438e, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=374, earliestPutTs=1733480287280 2024-12-06T10:18:07,395 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2fcaf53b78370d16571065501f9880b, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:07,395 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 395 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48160 deadline: 1733480347393, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2fcaf53b78370d16571065501f9880b, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:07,398 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): d2fcaf53b78370d16571065501f9880b#C#compaction#325 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-06T10:18:07,399 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/.tmp/C/4f25066c47774849b1c4a292b3f1272c is 50, key is test_row_0/C:col10/1733480287291/Put/seqid=0 2024-12-06T10:18:07,411 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742201_1377 (size=13255) 2024-12-06T10:18:07,452 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/.tmp/B/4401bacda07a4a0ead744c896fa74916 as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/B/4401bacda07a4a0ead744c896fa74916 2024-12-06T10:18:07,457 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in d2fcaf53b78370d16571065501f9880b/B of d2fcaf53b78370d16571065501f9880b into 4401bacda07a4a0ead744c896fa74916(size=12.8 K), total size for store is 36.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-06T10:18:07,457 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d2fcaf53b78370d16571065501f9880b: 2024-12-06T10:18:07,457 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b., storeName=d2fcaf53b78370d16571065501f9880b/B, priority=13, startTime=1733480286975; duration=0sec 2024-12-06T10:18:07,457 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T10:18:07,457 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d2fcaf53b78370d16571065501f9880b:B 2024-12-06T10:18:07,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=87 2024-12-06T10:18:07,470 INFO [Thread-1321 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 87 completed 2024-12-06T10:18:07,471 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-06T10:18:07,471 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] procedure2.ProcedureExecutor(1098): Stored pid=89, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=89, table=TestAcidGuarantees 2024-12-06T10:18:07,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=89 2024-12-06T10:18:07,472 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=89, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=89, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-06T10:18:07,473 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=89, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=89, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-06T10:18:07,473 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=90, ppid=89, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-06T10:18:07,498 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2fcaf53b78370d16571065501f9880b, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:07,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 397 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48160 deadline: 1733480347496, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2fcaf53b78370d16571065501f9880b, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:07,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=89 2024-12-06T10:18:07,624 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,33397,1733480204743 2024-12-06T10:18:07,624 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33397 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=90 2024-12-06T10:18:07,625 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b. 2024-12-06T10:18:07,625 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b. as already flushing 2024-12-06T10:18:07,625 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b. 2024-12-06T10:18:07,625 ERROR [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=90}] handler.RSProcedureHandler(58): pid=90 java.io.IOException: Unable to complete flush {ENCODED => d2fcaf53b78370d16571065501f9880b, NAME => 'TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:18:07,625 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=90 java.io.IOException: Unable to complete flush {ENCODED => d2fcaf53b78370d16571065501f9880b, NAME => 'TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:18:07,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.HMaster(4114): Remote procedure failed, pid=90 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d2fcaf53b78370d16571065501f9880b, NAME => 'TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d2fcaf53b78370d16571065501f9880b, NAME => 'TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:18:07,701 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2fcaf53b78370d16571065501f9880b, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:07,701 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 399 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48160 deadline: 1733480347700, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2fcaf53b78370d16571065501f9880b, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:07,768 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=397 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/.tmp/A/71a531f0196b473b9f8d37233720953e 2024-12-06T10:18:07,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=89 2024-12-06T10:18:07,775 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/.tmp/B/74bc2abc9c2c48e288e8bd7c83c67ea8 is 50, key is test_row_0/B:col10/1733480287352/Put/seqid=0 2024-12-06T10:18:07,777 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,33397,1733480204743 2024-12-06T10:18:07,777 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33397 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=90 2024-12-06T10:18:07,778 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b. 2024-12-06T10:18:07,778 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b. as already flushing 2024-12-06T10:18:07,778 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b. 2024-12-06T10:18:07,778 ERROR [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=90}] handler.RSProcedureHandler(58): pid=90 java.io.IOException: Unable to complete flush {ENCODED => d2fcaf53b78370d16571065501f9880b, NAME => 'TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:18:07,778 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=90 java.io.IOException: Unable to complete flush {ENCODED => d2fcaf53b78370d16571065501f9880b, NAME => 'TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:18:07,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.HMaster(4114): Remote procedure failed, pid=90 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d2fcaf53b78370d16571065501f9880b, NAME => 'TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d2fcaf53b78370d16571065501f9880b, NAME => 'TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:18:07,780 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742202_1378 (size=12301) 2024-12-06T10:18:07,816 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/.tmp/C/4f25066c47774849b1c4a292b3f1272c as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/C/4f25066c47774849b1c4a292b3f1272c 2024-12-06T10:18:07,826 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in d2fcaf53b78370d16571065501f9880b/C of d2fcaf53b78370d16571065501f9880b into 4f25066c47774849b1c4a292b3f1272c(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-06T10:18:07,826 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d2fcaf53b78370d16571065501f9880b: 2024-12-06T10:18:07,826 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b., storeName=d2fcaf53b78370d16571065501f9880b/C, priority=13, startTime=1733480287350; duration=0sec 2024-12-06T10:18:07,826 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T10:18:07,826 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d2fcaf53b78370d16571065501f9880b:C 2024-12-06T10:18:07,930 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,33397,1733480204743 2024-12-06T10:18:07,931 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33397 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=90 2024-12-06T10:18:07,931 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b. 2024-12-06T10:18:07,931 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b. as already flushing 2024-12-06T10:18:07,931 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b. 2024-12-06T10:18:07,931 ERROR [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=90}] handler.RSProcedureHandler(58): pid=90 java.io.IOException: Unable to complete flush {ENCODED => d2fcaf53b78370d16571065501f9880b, NAME => 'TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:18:07,931 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=90 java.io.IOException: Unable to complete flush {ENCODED => d2fcaf53b78370d16571065501f9880b, NAME => 'TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:18:07,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.HMaster(4114): Remote procedure failed, pid=90 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d2fcaf53b78370d16571065501f9880b, NAME => 'TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d2fcaf53b78370d16571065501f9880b, NAME => 'TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:18:08,004 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2fcaf53b78370d16571065501f9880b, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:08,004 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 401 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48160 deadline: 1733480348003, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2fcaf53b78370d16571065501f9880b, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:08,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=89 2024-12-06T10:18:08,083 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,33397,1733480204743 2024-12-06T10:18:08,083 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33397 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=90 2024-12-06T10:18:08,084 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b. 2024-12-06T10:18:08,084 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b. as already flushing 2024-12-06T10:18:08,084 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b. 2024-12-06T10:18:08,084 ERROR [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=90}] handler.RSProcedureHandler(58): pid=90 java.io.IOException: Unable to complete flush {ENCODED => d2fcaf53b78370d16571065501f9880b, NAME => 'TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:18:08,084 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=90 java.io.IOException: Unable to complete flush {ENCODED => d2fcaf53b78370d16571065501f9880b, NAME => 'TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:18:08,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.HMaster(4114): Remote procedure failed, pid=90 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d2fcaf53b78370d16571065501f9880b, NAME => 'TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d2fcaf53b78370d16571065501f9880b, NAME => 'TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:18:08,180 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=397 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/.tmp/B/74bc2abc9c2c48e288e8bd7c83c67ea8 2024-12-06T10:18:08,188 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/.tmp/C/d7aaaf0378814f2d92130d4ccc41b866 is 50, key is test_row_0/C:col10/1733480287352/Put/seqid=0 2024-12-06T10:18:08,192 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742203_1379 (size=12301) 2024-12-06T10:18:08,236 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,33397,1733480204743 2024-12-06T10:18:08,236 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33397 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=90 2024-12-06T10:18:08,237 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b. 2024-12-06T10:18:08,237 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b. as already flushing 2024-12-06T10:18:08,237 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b. 2024-12-06T10:18:08,237 ERROR [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=90}] handler.RSProcedureHandler(58): pid=90 java.io.IOException: Unable to complete flush {ENCODED => d2fcaf53b78370d16571065501f9880b, NAME => 'TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:18:08,237 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=90 java.io.IOException: Unable to complete flush {ENCODED => d2fcaf53b78370d16571065501f9880b, NAME => 'TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:18:08,238 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.HMaster(4114): Remote procedure failed, pid=90 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d2fcaf53b78370d16571065501f9880b, NAME => 'TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d2fcaf53b78370d16571065501f9880b, NAME => 'TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:18:08,389 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,33397,1733480204743 2024-12-06T10:18:08,390 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33397 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=90 2024-12-06T10:18:08,390 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b. 2024-12-06T10:18:08,390 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b. as already flushing 2024-12-06T10:18:08,390 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b. 2024-12-06T10:18:08,390 ERROR [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=90}] handler.RSProcedureHandler(58): pid=90 java.io.IOException: Unable to complete flush {ENCODED => d2fcaf53b78370d16571065501f9880b, NAME => 'TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:18:08,390 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=90 java.io.IOException: Unable to complete flush {ENCODED => d2fcaf53b78370d16571065501f9880b, NAME => 'TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:18:08,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.HMaster(4114): Remote procedure failed, pid=90 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d2fcaf53b78370d16571065501f9880b, NAME => 'TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d2fcaf53b78370d16571065501f9880b, NAME => 'TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:18:08,509 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2fcaf53b78370d16571065501f9880b, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:08,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 403 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48160 deadline: 1733480348508, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2fcaf53b78370d16571065501f9880b, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:08,542 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,33397,1733480204743 2024-12-06T10:18:08,543 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33397 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=90 2024-12-06T10:18:08,543 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b. 2024-12-06T10:18:08,543 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b. as already flushing 2024-12-06T10:18:08,543 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b. 2024-12-06T10:18:08,543 ERROR [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=90}] handler.RSProcedureHandler(58): pid=90 java.io.IOException: Unable to complete flush {ENCODED => d2fcaf53b78370d16571065501f9880b, NAME => 'TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:18:08,543 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=90 java.io.IOException: Unable to complete flush {ENCODED => d2fcaf53b78370d16571065501f9880b, NAME => 'TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:18:08,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.HMaster(4114): Remote procedure failed, pid=90 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d2fcaf53b78370d16571065501f9880b, NAME => 'TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d2fcaf53b78370d16571065501f9880b, NAME => 'TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:18:08,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=89 2024-12-06T10:18:08,592 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=397 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/.tmp/C/d7aaaf0378814f2d92130d4ccc41b866 2024-12-06T10:18:08,597 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/.tmp/A/71a531f0196b473b9f8d37233720953e as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/A/71a531f0196b473b9f8d37233720953e 2024-12-06T10:18:08,601 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/A/71a531f0196b473b9f8d37233720953e, entries=150, sequenceid=397, filesize=12.0 K 2024-12-06T10:18:08,602 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/.tmp/B/74bc2abc9c2c48e288e8bd7c83c67ea8 as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/B/74bc2abc9c2c48e288e8bd7c83c67ea8 2024-12-06T10:18:08,605 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/B/74bc2abc9c2c48e288e8bd7c83c67ea8, entries=150, sequenceid=397, filesize=12.0 K 2024-12-06T10:18:08,606 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/.tmp/C/d7aaaf0378814f2d92130d4ccc41b866 as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/C/d7aaaf0378814f2d92130d4ccc41b866 2024-12-06T10:18:08,609 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/C/d7aaaf0378814f2d92130d4ccc41b866, entries=150, sequenceid=397, filesize=12.0 K 2024-12-06T10:18:08,610 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=67.09 KB/68700 for d2fcaf53b78370d16571065501f9880b in 1257ms, sequenceid=397, compaction requested=true 2024-12-06T10:18:08,610 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for d2fcaf53b78370d16571065501f9880b: 2024-12-06T10:18:08,610 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d2fcaf53b78370d16571065501f9880b:A, priority=-2147483648, current under compaction store size is 1 2024-12-06T10:18:08,610 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T10:18:08,610 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d2fcaf53b78370d16571065501f9880b:B, priority=-2147483648, current under compaction store size is 2 2024-12-06T10:18:08,610 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 2 store files, 0 compacting, 2 eligible, 16 blocking 2024-12-06T10:18:08,610 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T10:18:08,610 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d2fcaf53b78370d16571065501f9880b:C, priority=-2147483648, current under compaction store size is 3 2024-12-06T10:18:08,610 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-06T10:18:08,610 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-06T10:18:08,610 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 0 files of size 0 starting at candidate #-1 after considering 0 permutations with 0 in ratio 2024-12-06T10:18:08,611 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.SortedCompactionPolicy(232): Not compacting files because we only have 0 files ready for compaction. Need 3 to initiate. 2024-12-06T10:18:08,611 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.CompactSplit(450): Not compacting TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b. because compaction request was cancelled 2024-12-06T10:18:08,611 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d2fcaf53b78370d16571065501f9880b:A 2024-12-06T10:18:08,611 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 2 store files, 0 compacting, 2 eligible, 16 blocking 2024-12-06T10:18:08,611 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 0 files of size 0 starting at candidate #-1 after considering 0 permutations with 0 in ratio 2024-12-06T10:18:08,611 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.SortedCompactionPolicy(232): Not compacting files because we only have 0 files ready for compaction. Need 3 to initiate. 2024-12-06T10:18:08,611 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.CompactSplit(450): Not compacting TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b. because compaction request was cancelled 2024-12-06T10:18:08,611 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d2fcaf53b78370d16571065501f9880b:C 2024-12-06T10:18:08,612 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 50056 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-06T10:18:08,612 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HStore(1540): d2fcaf53b78370d16571065501f9880b/B is initiating minor compaction (all files) 2024-12-06T10:18:08,612 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d2fcaf53b78370d16571065501f9880b/B in TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b. 2024-12-06T10:18:08,612 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/B/4401bacda07a4a0ead744c896fa74916, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/B/60b12ed22a2e45beac5599b864b46952, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/B/b1f1baf2f4a44cb080e1ab0fdfe8fcbc, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/B/74bc2abc9c2c48e288e8bd7c83c67ea8] into tmpdir=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/.tmp, totalSize=48.9 K 2024-12-06T10:18:08,612 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.Compactor(224): Compacting 4401bacda07a4a0ead744c896fa74916, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=336, earliestPutTs=1733480286105 2024-12-06T10:18:08,613 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.Compactor(224): Compacting 60b12ed22a2e45beac5599b864b46952, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=361, earliestPutTs=1733480286157 2024-12-06T10:18:08,613 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.Compactor(224): Compacting b1f1baf2f4a44cb080e1ab0fdfe8fcbc, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=374, earliestPutTs=1733480287280 2024-12-06T10:18:08,614 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.Compactor(224): Compacting 74bc2abc9c2c48e288e8bd7c83c67ea8, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=397, earliestPutTs=1733480287344 2024-12-06T10:18:08,622 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): d2fcaf53b78370d16571065501f9880b#B#compaction#328 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-06T10:18:08,623 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/.tmp/B/bb2a8d95a9a94c60b49d27fd1607c5b0 is 50, key is test_row_0/B:col10/1733480287352/Put/seqid=0 2024-12-06T10:18:08,628 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742204_1380 (size=13289) 2024-12-06T10:18:08,695 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,33397,1733480204743 2024-12-06T10:18:08,696 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33397 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=90 2024-12-06T10:18:08,696 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b. 2024-12-06T10:18:08,696 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.HRegion(2837): Flushing d2fcaf53b78370d16571065501f9880b 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-12-06T10:18:08,696 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d2fcaf53b78370d16571065501f9880b, store=A 2024-12-06T10:18:08,696 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:18:08,697 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d2fcaf53b78370d16571065501f9880b, store=B 2024-12-06T10:18:08,697 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:18:08,697 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d2fcaf53b78370d16571065501f9880b, store=C 2024-12-06T10:18:08,697 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:18:08,701 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=90}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/.tmp/A/c83e5815ee9b41669bbd71e57f0ed4a5 is 50, key is test_row_0/A:col10/1733480287388/Put/seqid=0 2024-12-06T10:18:08,707 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742205_1381 (size=12301) 2024-12-06T10:18:09,034 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/.tmp/B/bb2a8d95a9a94c60b49d27fd1607c5b0 as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/B/bb2a8d95a9a94c60b49d27fd1607c5b0 2024-12-06T10:18:09,040 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in d2fcaf53b78370d16571065501f9880b/B of d2fcaf53b78370d16571065501f9880b into bb2a8d95a9a94c60b49d27fd1607c5b0(size=13.0 K), total size for store is 13.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-06T10:18:09,040 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d2fcaf53b78370d16571065501f9880b: 2024-12-06T10:18:09,040 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b., storeName=d2fcaf53b78370d16571065501f9880b/B, priority=12, startTime=1733480288610; duration=0sec 2024-12-06T10:18:09,041 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T10:18:09,041 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d2fcaf53b78370d16571065501f9880b:B 2024-12-06T10:18:09,108 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=413 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/.tmp/A/c83e5815ee9b41669bbd71e57f0ed4a5 2024-12-06T10:18:09,115 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=90}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/.tmp/B/77235a7fd99b4403b49ae521e305cb1e is 50, key is test_row_0/B:col10/1733480287388/Put/seqid=0 2024-12-06T10:18:09,124 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742206_1382 (size=12301) 2024-12-06T10:18:09,125 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=413 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/.tmp/B/77235a7fd99b4403b49ae521e305cb1e 2024-12-06T10:18:09,134 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=90}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/.tmp/C/895dc08dbab44673ba46b783956ba7a5 is 50, key is test_row_0/C:col10/1733480287388/Put/seqid=0 2024-12-06T10:18:09,154 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742207_1383 (size=12301) 2024-12-06T10:18:09,154 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=413 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/.tmp/C/895dc08dbab44673ba46b783956ba7a5 2024-12-06T10:18:09,159 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/.tmp/A/c83e5815ee9b41669bbd71e57f0ed4a5 as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/A/c83e5815ee9b41669bbd71e57f0ed4a5 2024-12-06T10:18:09,178 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/A/c83e5815ee9b41669bbd71e57f0ed4a5, entries=150, sequenceid=413, filesize=12.0 K 2024-12-06T10:18:09,179 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/.tmp/B/77235a7fd99b4403b49ae521e305cb1e as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/B/77235a7fd99b4403b49ae521e305cb1e 2024-12-06T10:18:09,183 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/B/77235a7fd99b4403b49ae521e305cb1e, entries=150, sequenceid=413, filesize=12.0 K 2024-12-06T10:18:09,184 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/.tmp/C/895dc08dbab44673ba46b783956ba7a5 as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/C/895dc08dbab44673ba46b783956ba7a5 2024-12-06T10:18:09,188 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/C/895dc08dbab44673ba46b783956ba7a5, entries=150, sequenceid=413, filesize=12.0 K 2024-12-06T10:18:09,189 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=0 B/0 for d2fcaf53b78370d16571065501f9880b in 493ms, sequenceid=413, compaction requested=true 2024-12-06T10:18:09,189 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.HRegion(2538): Flush status journal for d2fcaf53b78370d16571065501f9880b: 2024-12-06T10:18:09,189 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b. 2024-12-06T10:18:09,189 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=90 2024-12-06T10:18:09,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.HMaster(4106): Remote procedure done, pid=90 2024-12-06T10:18:09,192 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=90, resume processing ppid=89 2024-12-06T10:18:09,192 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=90, ppid=89, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.7170 sec 2024-12-06T10:18:09,193 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=89, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=89, table=TestAcidGuarantees in 1.7210 sec 2024-12-06T10:18:09,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(8581): Flush requested on d2fcaf53b78370d16571065501f9880b 2024-12-06T10:18:09,549 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing d2fcaf53b78370d16571065501f9880b 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-06T10:18:09,549 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d2fcaf53b78370d16571065501f9880b, store=A 2024-12-06T10:18:09,549 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:18:09,549 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d2fcaf53b78370d16571065501f9880b, store=B 2024-12-06T10:18:09,549 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:18:09,549 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d2fcaf53b78370d16571065501f9880b, store=C 2024-12-06T10:18:09,549 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:18:09,554 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/.tmp/A/633b4122011a41f1a2c812d1a78f8cfb is 50, key is test_row_0/A:col10/1733480289547/Put/seqid=0 2024-12-06T10:18:09,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=89 2024-12-06T10:18:09,577 INFO [Thread-1321 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 89 completed 2024-12-06T10:18:09,579 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-06T10:18:09,579 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742208_1384 (size=14741) 2024-12-06T10:18:09,580 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=425 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/.tmp/A/633b4122011a41f1a2c812d1a78f8cfb 2024-12-06T10:18:09,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] procedure2.ProcedureExecutor(1098): Stored pid=91, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=91, table=TestAcidGuarantees 2024-12-06T10:18:09,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=91 2024-12-06T10:18:09,582 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=91, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=91, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-06T10:18:09,582 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=91, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=91, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-06T10:18:09,582 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=92, ppid=91, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-06T10:18:09,614 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/.tmp/B/c05843223b8749e6bad1b4dfce007a30 is 50, key is test_row_0/B:col10/1733480289547/Put/seqid=0 2024-12-06T10:18:09,633 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742209_1385 (size=12301) 2024-12-06T10:18:09,649 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=425 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/.tmp/B/c05843223b8749e6bad1b4dfce007a30 2024-12-06T10:18:09,660 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2fcaf53b78370d16571065501f9880b, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:09,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 435 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48160 deadline: 1733480349657, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2fcaf53b78370d16571065501f9880b, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:09,662 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/.tmp/C/b7fe98bfe5c14338ae9fe1af1726d224 is 50, key is test_row_0/C:col10/1733480289547/Put/seqid=0 2024-12-06T10:18:09,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=91 2024-12-06T10:18:09,710 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742210_1386 (size=12301) 2024-12-06T10:18:09,712 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=425 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/.tmp/C/b7fe98bfe5c14338ae9fe1af1726d224 2024-12-06T10:18:09,717 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/.tmp/A/633b4122011a41f1a2c812d1a78f8cfb as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/A/633b4122011a41f1a2c812d1a78f8cfb 2024-12-06T10:18:09,730 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/A/633b4122011a41f1a2c812d1a78f8cfb, entries=200, sequenceid=425, filesize=14.4 K 2024-12-06T10:18:09,731 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/.tmp/B/c05843223b8749e6bad1b4dfce007a30 as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/B/c05843223b8749e6bad1b4dfce007a30 2024-12-06T10:18:09,734 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,33397,1733480204743 2024-12-06T10:18:09,735 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33397 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=92 2024-12-06T10:18:09,735 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b. 2024-12-06T10:18:09,735 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/B/c05843223b8749e6bad1b4dfce007a30, entries=150, sequenceid=425, filesize=12.0 K 2024-12-06T10:18:09,735 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b. as already flushing 2024-12-06T10:18:09,735 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b. 2024-12-06T10:18:09,735 ERROR [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=92}] handler.RSProcedureHandler(58): pid=92 java.io.IOException: Unable to complete flush {ENCODED => d2fcaf53b78370d16571065501f9880b, NAME => 'TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:18:09,736 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=92 java.io.IOException: Unable to complete flush {ENCODED => d2fcaf53b78370d16571065501f9880b, NAME => 'TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:18:09,736 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/.tmp/C/b7fe98bfe5c14338ae9fe1af1726d224 as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/C/b7fe98bfe5c14338ae9fe1af1726d224 2024-12-06T10:18:09,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.HMaster(4114): Remote procedure failed, pid=92 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d2fcaf53b78370d16571065501f9880b, NAME => 'TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d2fcaf53b78370d16571065501f9880b, NAME => 'TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:18:09,741 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/C/b7fe98bfe5c14338ae9fe1af1726d224, entries=150, sequenceid=425, filesize=12.0 K 2024-12-06T10:18:09,742 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for d2fcaf53b78370d16571065501f9880b in 193ms, sequenceid=425, compaction requested=true 2024-12-06T10:18:09,742 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for d2fcaf53b78370d16571065501f9880b: 2024-12-06T10:18:09,742 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d2fcaf53b78370d16571065501f9880b:A, priority=-2147483648, current under compaction store size is 1 2024-12-06T10:18:09,742 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T10:18:09,742 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d2fcaf53b78370d16571065501f9880b:B, priority=-2147483648, current under compaction store size is 2 2024-12-06T10:18:09,742 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-06T10:18:09,742 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T10:18:09,742 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-06T10:18:09,742 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d2fcaf53b78370d16571065501f9880b:C, priority=-2147483648, current under compaction store size is 3 2024-12-06T10:18:09,742 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-06T10:18:09,744 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37891 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-06T10:18:09,744 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HStore(1540): d2fcaf53b78370d16571065501f9880b/B is initiating minor compaction (all files) 2024-12-06T10:18:09,744 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d2fcaf53b78370d16571065501f9880b/B in TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b. 2024-12-06T10:18:09,744 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/B/bb2a8d95a9a94c60b49d27fd1607c5b0, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/B/77235a7fd99b4403b49ae521e305cb1e, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/B/c05843223b8749e6bad1b4dfce007a30] into tmpdir=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/.tmp, totalSize=37.0 K 2024-12-06T10:18:09,744 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 52598 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-06T10:18:09,744 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.Compactor(224): Compacting bb2a8d95a9a94c60b49d27fd1607c5b0, keycount=150, bloomtype=ROW, size=13.0 K, encoding=NONE, compression=NONE, seqNum=397, earliestPutTs=1733480287344 2024-12-06T10:18:09,744 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HStore(1540): d2fcaf53b78370d16571065501f9880b/A is initiating minor compaction (all files) 2024-12-06T10:18:09,744 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d2fcaf53b78370d16571065501f9880b/A in TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b. 2024-12-06T10:18:09,745 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/A/67d54b9fa4d84b898ba13044e0c7a876, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/A/71a531f0196b473b9f8d37233720953e, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/A/c83e5815ee9b41669bbd71e57f0ed4a5, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/A/633b4122011a41f1a2c812d1a78f8cfb] into tmpdir=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/.tmp, totalSize=51.4 K 2024-12-06T10:18:09,745 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.Compactor(224): Compacting 67d54b9fa4d84b898ba13044e0c7a876, keycount=150, bloomtype=ROW, size=12.9 K, encoding=NONE, compression=NONE, seqNum=374, earliestPutTs=1733480287280 2024-12-06T10:18:09,745 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.Compactor(224): Compacting 77235a7fd99b4403b49ae521e305cb1e, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=413, earliestPutTs=1733480287369 2024-12-06T10:18:09,746 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.Compactor(224): Compacting 71a531f0196b473b9f8d37233720953e, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=397, earliestPutTs=1733480287344 2024-12-06T10:18:09,746 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.Compactor(224): Compacting c05843223b8749e6bad1b4dfce007a30, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=425, earliestPutTs=1733480289532 2024-12-06T10:18:09,746 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.Compactor(224): Compacting c83e5815ee9b41669bbd71e57f0ed4a5, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=413, earliestPutTs=1733480287369 2024-12-06T10:18:09,747 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.Compactor(224): Compacting 633b4122011a41f1a2c812d1a78f8cfb, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=425, earliestPutTs=1733480289532 2024-12-06T10:18:09,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(8581): Flush requested on d2fcaf53b78370d16571065501f9880b 2024-12-06T10:18:09,765 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing d2fcaf53b78370d16571065501f9880b 3/3 column families, dataSize=154.31 KB heapSize=405.05 KB 2024-12-06T10:18:09,765 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d2fcaf53b78370d16571065501f9880b, store=A 2024-12-06T10:18:09,766 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:18:09,766 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d2fcaf53b78370d16571065501f9880b, store=B 2024-12-06T10:18:09,766 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:18:09,766 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d2fcaf53b78370d16571065501f9880b, store=C 2024-12-06T10:18:09,766 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:18:09,772 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): d2fcaf53b78370d16571065501f9880b#B#compaction#335 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-06T10:18:09,773 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/.tmp/B/4f606fd6699d4154b063b5a9d6f3dc20 is 50, key is test_row_0/B:col10/1733480289547/Put/seqid=0 2024-12-06T10:18:09,777 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): d2fcaf53b78370d16571065501f9880b#A#compaction#336 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-06T10:18:09,777 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/.tmp/A/8cac81a5fbff41ceba93bdae284d45ca is 50, key is test_row_0/A:col10/1733480289547/Put/seqid=0 2024-12-06T10:18:09,791 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/.tmp/A/21f9f8e2aa9547b7a1381c695c063d2c is 50, key is test_row_0/A:col10/1733480289763/Put/seqid=0 2024-12-06T10:18:09,803 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2fcaf53b78370d16571065501f9880b, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:09,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 445 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48160 deadline: 1733480349801, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2fcaf53b78370d16571065501f9880b, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:09,834 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742213_1389 (size=14741) 2024-12-06T10:18:09,834 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742212_1388 (size=13391) 2024-12-06T10:18:09,835 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=451 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/.tmp/A/21f9f8e2aa9547b7a1381c695c063d2c 2024-12-06T10:18:09,865 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742211_1387 (size=13391) 2024-12-06T10:18:09,867 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/.tmp/B/53f03797a9b84f858aa8d9d881ebaea5 is 50, key is test_row_0/B:col10/1733480289763/Put/seqid=0 2024-12-06T10:18:09,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=91 2024-12-06T10:18:09,887 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,33397,1733480204743 2024-12-06T10:18:09,887 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33397 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=92 2024-12-06T10:18:09,888 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b. 2024-12-06T10:18:09,888 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b. as already flushing 2024-12-06T10:18:09,888 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b. 2024-12-06T10:18:09,888 ERROR [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] handler.RSProcedureHandler(58): pid=92 java.io.IOException: Unable to complete flush {ENCODED => d2fcaf53b78370d16571065501f9880b, NAME => 'TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:18:09,888 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=92 java.io.IOException: Unable to complete flush {ENCODED => d2fcaf53b78370d16571065501f9880b, NAME => 'TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:18:09,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.HMaster(4114): Remote procedure failed, pid=92 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d2fcaf53b78370d16571065501f9880b, NAME => 'TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d2fcaf53b78370d16571065501f9880b, NAME => 'TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:18:09,909 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2fcaf53b78370d16571065501f9880b, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:09,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 447 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48160 deadline: 1733480349905, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2fcaf53b78370d16571065501f9880b, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:09,918 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742214_1390 (size=12301) 2024-12-06T10:18:10,041 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,33397,1733480204743 2024-12-06T10:18:10,041 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33397 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=92 2024-12-06T10:18:10,041 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b. 2024-12-06T10:18:10,041 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b. as already flushing 2024-12-06T10:18:10,042 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b. 2024-12-06T10:18:10,042 ERROR [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=92}] handler.RSProcedureHandler(58): pid=92 java.io.IOException: Unable to complete flush {ENCODED => d2fcaf53b78370d16571065501f9880b, NAME => 'TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:18:10,042 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=92 java.io.IOException: Unable to complete flush {ENCODED => d2fcaf53b78370d16571065501f9880b, NAME => 'TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:18:10,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.HMaster(4114): Remote procedure failed, pid=92 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d2fcaf53b78370d16571065501f9880b, NAME => 'TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d2fcaf53b78370d16571065501f9880b, NAME => 'TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:18:10,113 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2fcaf53b78370d16571065501f9880b, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:10,113 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 449 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48160 deadline: 1733480350111, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2fcaf53b78370d16571065501f9880b, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:10,184 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=91 2024-12-06T10:18:10,194 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,33397,1733480204743 2024-12-06T10:18:10,194 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33397 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=92 2024-12-06T10:18:10,194 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b. 2024-12-06T10:18:10,195 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b. as already flushing 2024-12-06T10:18:10,195 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b. 2024-12-06T10:18:10,195 ERROR [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=92}] handler.RSProcedureHandler(58): pid=92 java.io.IOException: Unable to complete flush {ENCODED => d2fcaf53b78370d16571065501f9880b, NAME => 'TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:18:10,195 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=92 java.io.IOException: Unable to complete flush {ENCODED => d2fcaf53b78370d16571065501f9880b, NAME => 'TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:18:10,195 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.HMaster(4114): Remote procedure failed, pid=92 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d2fcaf53b78370d16571065501f9880b, NAME => 'TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d2fcaf53b78370d16571065501f9880b, NAME => 'TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:18:10,240 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/.tmp/A/8cac81a5fbff41ceba93bdae284d45ca as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/A/8cac81a5fbff41ceba93bdae284d45ca 2024-12-06T10:18:10,246 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in d2fcaf53b78370d16571065501f9880b/A of d2fcaf53b78370d16571065501f9880b into 8cac81a5fbff41ceba93bdae284d45ca(size=13.1 K), total size for store is 13.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-06T10:18:10,246 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d2fcaf53b78370d16571065501f9880b: 2024-12-06T10:18:10,246 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b., storeName=d2fcaf53b78370d16571065501f9880b/A, priority=12, startTime=1733480289742; duration=0sec 2024-12-06T10:18:10,246 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-06T10:18:10,246 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d2fcaf53b78370d16571065501f9880b:A 2024-12-06T10:18:10,246 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-06T10:18:10,248 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 50158 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-06T10:18:10,248 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HStore(1540): d2fcaf53b78370d16571065501f9880b/C is initiating minor compaction (all files) 2024-12-06T10:18:10,248 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d2fcaf53b78370d16571065501f9880b/C in TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b. 2024-12-06T10:18:10,248 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/C/4f25066c47774849b1c4a292b3f1272c, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/C/d7aaaf0378814f2d92130d4ccc41b866, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/C/895dc08dbab44673ba46b783956ba7a5, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/C/b7fe98bfe5c14338ae9fe1af1726d224] into tmpdir=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/.tmp, totalSize=49.0 K 2024-12-06T10:18:10,249 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.Compactor(224): Compacting 4f25066c47774849b1c4a292b3f1272c, keycount=150, bloomtype=ROW, size=12.9 K, encoding=NONE, compression=NONE, seqNum=374, earliestPutTs=1733480287280 2024-12-06T10:18:10,249 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.Compactor(224): Compacting d7aaaf0378814f2d92130d4ccc41b866, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=397, earliestPutTs=1733480287344 2024-12-06T10:18:10,249 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.Compactor(224): Compacting 895dc08dbab44673ba46b783956ba7a5, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=413, earliestPutTs=1733480287369 2024-12-06T10:18:10,250 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.Compactor(224): Compacting b7fe98bfe5c14338ae9fe1af1726d224, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=425, earliestPutTs=1733480289532 2024-12-06T10:18:10,260 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): d2fcaf53b78370d16571065501f9880b#C#compaction#339 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-06T10:18:10,261 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/.tmp/C/2664288a38934ad29c92a1ebea03f2dd is 50, key is test_row_0/C:col10/1733480289547/Put/seqid=0 2024-12-06T10:18:10,271 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/.tmp/B/4f606fd6699d4154b063b5a9d6f3dc20 as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/B/4f606fd6699d4154b063b5a9d6f3dc20 2024-12-06T10:18:10,278 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in d2fcaf53b78370d16571065501f9880b/B of d2fcaf53b78370d16571065501f9880b into 4f606fd6699d4154b063b5a9d6f3dc20(size=13.1 K), total size for store is 13.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-06T10:18:10,278 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d2fcaf53b78370d16571065501f9880b: 2024-12-06T10:18:10,278 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b., storeName=d2fcaf53b78370d16571065501f9880b/B, priority=13, startTime=1733480289742; duration=0sec 2024-12-06T10:18:10,279 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T10:18:10,279 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d2fcaf53b78370d16571065501f9880b:B 2024-12-06T10:18:10,294 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742215_1391 (size=13391) 2024-12-06T10:18:10,318 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=451 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/.tmp/B/53f03797a9b84f858aa8d9d881ebaea5 2024-12-06T10:18:10,334 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/.tmp/C/e483ae395c87449a84d7c97a44cb6ca1 is 50, key is test_row_0/C:col10/1733480289763/Put/seqid=0 2024-12-06T10:18:10,344 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742216_1392 (size=12301) 2024-12-06T10:18:10,347 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,33397,1733480204743 2024-12-06T10:18:10,349 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33397 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=92 2024-12-06T10:18:10,349 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b. 2024-12-06T10:18:10,350 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b. as already flushing 2024-12-06T10:18:10,350 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b. 2024-12-06T10:18:10,350 ERROR [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] handler.RSProcedureHandler(58): pid=92 java.io.IOException: Unable to complete flush {ENCODED => d2fcaf53b78370d16571065501f9880b, NAME => 'TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:18:10,350 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=92 java.io.IOException: Unable to complete flush {ENCODED => d2fcaf53b78370d16571065501f9880b, NAME => 'TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:18:10,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.HMaster(4114): Remote procedure failed, pid=92 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d2fcaf53b78370d16571065501f9880b, NAME => 'TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d2fcaf53b78370d16571065501f9880b, NAME => 'TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:18:10,416 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2fcaf53b78370d16571065501f9880b, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:10,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 451 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48160 deadline: 1733480350415, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2fcaf53b78370d16571065501f9880b, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:10,502 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,33397,1733480204743 2024-12-06T10:18:10,502 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33397 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=92 2024-12-06T10:18:10,503 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b. 2024-12-06T10:18:10,503 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b. as already flushing 2024-12-06T10:18:10,503 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b. 2024-12-06T10:18:10,503 ERROR [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=92}] handler.RSProcedureHandler(58): pid=92 java.io.IOException: Unable to complete flush {ENCODED => d2fcaf53b78370d16571065501f9880b, NAME => 'TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:18:10,503 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=92 java.io.IOException: Unable to complete flush {ENCODED => d2fcaf53b78370d16571065501f9880b, NAME => 'TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:18:10,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.HMaster(4114): Remote procedure failed, pid=92 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d2fcaf53b78370d16571065501f9880b, NAME => 'TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d2fcaf53b78370d16571065501f9880b, NAME => 'TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:18:10,655 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,33397,1733480204743 2024-12-06T10:18:10,656 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33397 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=92 2024-12-06T10:18:10,656 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b. 2024-12-06T10:18:10,656 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b. as already flushing 2024-12-06T10:18:10,656 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b. 2024-12-06T10:18:10,656 ERROR [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=92}] handler.RSProcedureHandler(58): pid=92 java.io.IOException: Unable to complete flush {ENCODED => d2fcaf53b78370d16571065501f9880b, NAME => 'TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:18:10,656 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=92 java.io.IOException: Unable to complete flush {ENCODED => d2fcaf53b78370d16571065501f9880b, NAME => 'TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:18:10,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.HMaster(4114): Remote procedure failed, pid=92 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d2fcaf53b78370d16571065501f9880b, NAME => 'TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d2fcaf53b78370d16571065501f9880b, NAME => 'TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:18:10,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=91 2024-12-06T10:18:10,701 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/.tmp/C/2664288a38934ad29c92a1ebea03f2dd as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/C/2664288a38934ad29c92a1ebea03f2dd 2024-12-06T10:18:10,708 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in d2fcaf53b78370d16571065501f9880b/C of d2fcaf53b78370d16571065501f9880b into 2664288a38934ad29c92a1ebea03f2dd(size=13.1 K), total size for store is 13.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-06T10:18:10,708 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d2fcaf53b78370d16571065501f9880b: 2024-12-06T10:18:10,708 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b., storeName=d2fcaf53b78370d16571065501f9880b/C, priority=12, startTime=1733480289742; duration=0sec 2024-12-06T10:18:10,708 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T10:18:10,708 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d2fcaf53b78370d16571065501f9880b:C 2024-12-06T10:18:10,745 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=451 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/.tmp/C/e483ae395c87449a84d7c97a44cb6ca1 2024-12-06T10:18:10,750 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/.tmp/A/21f9f8e2aa9547b7a1381c695c063d2c as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/A/21f9f8e2aa9547b7a1381c695c063d2c 2024-12-06T10:18:10,754 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/A/21f9f8e2aa9547b7a1381c695c063d2c, entries=200, sequenceid=451, filesize=14.4 K 2024-12-06T10:18:10,755 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/.tmp/B/53f03797a9b84f858aa8d9d881ebaea5 as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/B/53f03797a9b84f858aa8d9d881ebaea5 2024-12-06T10:18:10,760 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/B/53f03797a9b84f858aa8d9d881ebaea5, entries=150, sequenceid=451, filesize=12.0 K 2024-12-06T10:18:10,767 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/.tmp/C/e483ae395c87449a84d7c97a44cb6ca1 as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/C/e483ae395c87449a84d7c97a44cb6ca1 2024-12-06T10:18:10,777 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/C/e483ae395c87449a84d7c97a44cb6ca1, entries=150, sequenceid=451, filesize=12.0 K 2024-12-06T10:18:10,779 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~154.31 KB/158010, heapSize ~405 KB/414720, currentSize=46.96 KB/48090 for d2fcaf53b78370d16571065501f9880b in 1014ms, sequenceid=451, compaction requested=false 2024-12-06T10:18:10,779 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for d2fcaf53b78370d16571065501f9880b: 2024-12-06T10:18:10,808 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,33397,1733480204743 2024-12-06T10:18:10,809 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33397 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=92 2024-12-06T10:18:10,809 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b. 2024-12-06T10:18:10,809 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.HRegion(2837): Flushing d2fcaf53b78370d16571065501f9880b 3/3 column families, dataSize=46.96 KB heapSize=123.80 KB 2024-12-06T10:18:10,810 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d2fcaf53b78370d16571065501f9880b, store=A 2024-12-06T10:18:10,810 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:18:10,810 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d2fcaf53b78370d16571065501f9880b, store=B 2024-12-06T10:18:10,810 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:18:10,810 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d2fcaf53b78370d16571065501f9880b, store=C 2024-12-06T10:18:10,810 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:18:10,824 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/.tmp/A/9223034b48e5469db4f66b5f8619c93c is 50, key is test_row_0/A:col10/1733480289799/Put/seqid=0 2024-12-06T10:18:10,869 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742217_1393 (size=9857) 2024-12-06T10:18:10,922 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b. as already flushing 2024-12-06T10:18:10,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(8581): Flush requested on d2fcaf53b78370d16571065501f9880b 2024-12-06T10:18:10,959 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2fcaf53b78370d16571065501f9880b, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:10,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48052 deadline: 1733480350956, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2fcaf53b78370d16571065501f9880b, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:10,961 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2fcaf53b78370d16571065501f9880b, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:10,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 25 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48138 deadline: 1733480350958, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2fcaf53b78370d16571065501f9880b, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:10,961 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2fcaf53b78370d16571065501f9880b, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:10,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 464 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48160 deadline: 1733480350959, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2fcaf53b78370d16571065501f9880b, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:10,966 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2fcaf53b78370d16571065501f9880b, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:10,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 25 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48122 deadline: 1733480350965, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2fcaf53b78370d16571065501f9880b, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:10,966 DEBUG [Thread-1315 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=8, retries=16, started=18199 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2fcaf53b78370d16571065501f9880b, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_2' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b., hostname=552d6a33fa09,33397,1733480204743, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2fcaf53b78370d16571065501f9880b, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2fcaf53b78370d16571065501f9880b, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-06T10:18:10,975 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2fcaf53b78370d16571065501f9880b, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:10,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48084 deadline: 1733480350975, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2fcaf53b78370d16571065501f9880b, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:10,976 DEBUG [Thread-1313 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=8, retries=16, started=18212 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2fcaf53b78370d16571065501f9880b, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_2' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b., hostname=552d6a33fa09,33397,1733480204743, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2fcaf53b78370d16571065501f9880b, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2fcaf53b78370d16571065501f9880b, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-06T10:18:11,064 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2fcaf53b78370d16571065501f9880b, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:11,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48052 deadline: 1733480351061, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2fcaf53b78370d16571065501f9880b, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:11,064 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2fcaf53b78370d16571065501f9880b, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:11,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48138 deadline: 1733480351062, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2fcaf53b78370d16571065501f9880b, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:11,065 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2fcaf53b78370d16571065501f9880b, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:11,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 466 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48160 deadline: 1733480351062, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2fcaf53b78370d16571065501f9880b, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:11,267 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2fcaf53b78370d16571065501f9880b, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:11,267 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48138 deadline: 1733480351266, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2fcaf53b78370d16571065501f9880b, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:11,267 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2fcaf53b78370d16571065501f9880b, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:11,267 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48052 deadline: 1733480351266, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2fcaf53b78370d16571065501f9880b, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:11,269 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2fcaf53b78370d16571065501f9880b, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:11,269 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 468 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48160 deadline: 1733480351267, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2fcaf53b78370d16571065501f9880b, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:11,270 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=464 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/.tmp/A/9223034b48e5469db4f66b5f8619c93c 2024-12-06T10:18:11,278 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/.tmp/B/f0e35ee7e5db424c8500f70d5267d777 is 50, key is test_row_0/B:col10/1733480289799/Put/seqid=0 2024-12-06T10:18:11,293 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742218_1394 (size=9857) 2024-12-06T10:18:11,569 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2fcaf53b78370d16571065501f9880b, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:11,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48052 deadline: 1733480351569, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2fcaf53b78370d16571065501f9880b, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:11,571 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2fcaf53b78370d16571065501f9880b, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:11,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48138 deadline: 1733480351570, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2fcaf53b78370d16571065501f9880b, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:11,572 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2fcaf53b78370d16571065501f9880b, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:11,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 470 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48160 deadline: 1733480351571, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2fcaf53b78370d16571065501f9880b, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:11,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=91 2024-12-06T10:18:11,695 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=464 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/.tmp/B/f0e35ee7e5db424c8500f70d5267d777 2024-12-06T10:18:11,703 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/.tmp/C/60060ceed41d4f0d9a326be7dd2dba96 is 50, key is test_row_0/C:col10/1733480289799/Put/seqid=0 2024-12-06T10:18:11,713 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742219_1395 (size=9857) 2024-12-06T10:18:12,077 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2fcaf53b78370d16571065501f9880b, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:12,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48052 deadline: 1733480352075, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2fcaf53b78370d16571065501f9880b, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:12,078 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2fcaf53b78370d16571065501f9880b, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:12,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48138 deadline: 1733480352076, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2fcaf53b78370d16571065501f9880b, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:12,079 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2fcaf53b78370d16571065501f9880b, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:12,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 472 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48160 deadline: 1733480352078, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2fcaf53b78370d16571065501f9880b, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:12,114 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=464 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/.tmp/C/60060ceed41d4f0d9a326be7dd2dba96 2024-12-06T10:18:12,119 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/.tmp/A/9223034b48e5469db4f66b5f8619c93c as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/A/9223034b48e5469db4f66b5f8619c93c 2024-12-06T10:18:12,122 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/A/9223034b48e5469db4f66b5f8619c93c, entries=100, sequenceid=464, filesize=9.6 K 2024-12-06T10:18:12,123 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/.tmp/B/f0e35ee7e5db424c8500f70d5267d777 as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/B/f0e35ee7e5db424c8500f70d5267d777 2024-12-06T10:18:12,128 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/B/f0e35ee7e5db424c8500f70d5267d777, entries=100, sequenceid=464, filesize=9.6 K 2024-12-06T10:18:12,129 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/.tmp/C/60060ceed41d4f0d9a326be7dd2dba96 as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/C/60060ceed41d4f0d9a326be7dd2dba96 2024-12-06T10:18:12,132 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/C/60060ceed41d4f0d9a326be7dd2dba96, entries=100, sequenceid=464, filesize=9.6 K 2024-12-06T10:18:12,133 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.HRegion(3040): Finished flush of dataSize ~46.96 KB/48090, heapSize ~123.75 KB/126720, currentSize=154.31 KB/158010 for d2fcaf53b78370d16571065501f9880b in 1324ms, sequenceid=464, compaction requested=true 2024-12-06T10:18:12,133 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.HRegion(2538): Flush status journal for d2fcaf53b78370d16571065501f9880b: 2024-12-06T10:18:12,133 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b. 2024-12-06T10:18:12,133 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=92 2024-12-06T10:18:12,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.HMaster(4106): Remote procedure done, pid=92 2024-12-06T10:18:12,135 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=92, resume processing ppid=91 2024-12-06T10:18:12,136 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=92, ppid=91, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.5520 sec 2024-12-06T10:18:12,137 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=91, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=91, table=TestAcidGuarantees in 2.5570 sec 2024-12-06T10:18:12,740 DEBUG [Thread-1324 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x088aa519 to 127.0.0.1:61610 2024-12-06T10:18:12,740 DEBUG [Thread-1324 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-06T10:18:12,742 DEBUG [Thread-1322 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x78b04266 to 127.0.0.1:61610 2024-12-06T10:18:12,742 DEBUG [Thread-1322 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-06T10:18:12,742 DEBUG [Thread-1328 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x2e4c79b8 to 127.0.0.1:61610 2024-12-06T10:18:12,742 DEBUG [Thread-1328 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-06T10:18:12,743 DEBUG [Thread-1326 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x5e998dd3 to 127.0.0.1:61610 2024-12-06T10:18:12,743 DEBUG [Thread-1326 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-06T10:18:12,744 DEBUG [Thread-1330 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x2d1403c3 to 127.0.0.1:61610 2024-12-06T10:18:12,744 DEBUG [Thread-1330 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-06T10:18:13,056 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-06T10:18:13,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(8581): Flush requested on d2fcaf53b78370d16571065501f9880b 2024-12-06T10:18:13,081 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing d2fcaf53b78370d16571065501f9880b 3/3 column families, dataSize=161.02 KB heapSize=422.63 KB 2024-12-06T10:18:13,081 DEBUG [Thread-1319 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x03a703d2 to 127.0.0.1:61610 2024-12-06T10:18:13,081 DEBUG [Thread-1319 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-06T10:18:13,082 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d2fcaf53b78370d16571065501f9880b, store=A 2024-12-06T10:18:13,082 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:18:13,082 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d2fcaf53b78370d16571065501f9880b, store=B 2024-12-06T10:18:13,082 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:18:13,082 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d2fcaf53b78370d16571065501f9880b, store=C 2024-12-06T10:18:13,082 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:18:13,086 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/.tmp/A/bc076e49b22448efa88c0b5f275966f9 is 50, key is test_row_0/A:col10/1733480290956/Put/seqid=0 2024-12-06T10:18:13,087 DEBUG [Thread-1311 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x44645c55 to 127.0.0.1:61610 2024-12-06T10:18:13,087 DEBUG [Thread-1311 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-06T10:18:13,088 DEBUG [Thread-1317 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x07e55eb7 to 127.0.0.1:61610 2024-12-06T10:18:13,088 DEBUG [Thread-1317 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-06T10:18:13,089 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742220_1396 (size=12301) 2024-12-06T10:18:13,490 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=491 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/.tmp/A/bc076e49b22448efa88c0b5f275966f9 2024-12-06T10:18:13,496 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/.tmp/B/20a31f12d9a14002865f3fa020c62350 is 50, key is test_row_0/B:col10/1733480290956/Put/seqid=0 2024-12-06T10:18:13,500 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742221_1397 (size=12301) 2024-12-06T10:18:13,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=91 2024-12-06T10:18:13,687 INFO [Thread-1321 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 91 completed 2024-12-06T10:18:13,900 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=491 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/.tmp/B/20a31f12d9a14002865f3fa020c62350 2024-12-06T10:18:13,906 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/.tmp/C/cb1e99645b4c47f79228273e6c444fca is 50, key is test_row_0/C:col10/1733480290956/Put/seqid=0 2024-12-06T10:18:13,909 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742222_1398 (size=12301) 2024-12-06T10:18:14,310 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=491 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/.tmp/C/cb1e99645b4c47f79228273e6c444fca 2024-12-06T10:18:14,314 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/.tmp/A/bc076e49b22448efa88c0b5f275966f9 as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/A/bc076e49b22448efa88c0b5f275966f9 2024-12-06T10:18:14,317 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/A/bc076e49b22448efa88c0b5f275966f9, entries=150, sequenceid=491, filesize=12.0 K 2024-12-06T10:18:14,318 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/.tmp/B/20a31f12d9a14002865f3fa020c62350 as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/B/20a31f12d9a14002865f3fa020c62350 2024-12-06T10:18:14,321 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/B/20a31f12d9a14002865f3fa020c62350, entries=150, sequenceid=491, filesize=12.0 K 2024-12-06T10:18:14,321 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/.tmp/C/cb1e99645b4c47f79228273e6c444fca as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/C/cb1e99645b4c47f79228273e6c444fca 2024-12-06T10:18:14,324 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/C/cb1e99645b4c47f79228273e6c444fca, entries=150, sequenceid=491, filesize=12.0 K 2024-12-06T10:18:14,325 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~161.02 KB/164880, heapSize ~422.58 KB/432720, currentSize=13.42 KB/13740 for d2fcaf53b78370d16571065501f9880b in 1244ms, sequenceid=491, compaction requested=true 2024-12-06T10:18:14,325 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for d2fcaf53b78370d16571065501f9880b: 2024-12-06T10:18:14,326 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d2fcaf53b78370d16571065501f9880b:A, priority=-2147483648, current under compaction store size is 1 2024-12-06T10:18:14,326 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-06T10:18:14,326 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T10:18:14,326 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d2fcaf53b78370d16571065501f9880b:B, priority=-2147483648, current under compaction store size is 2 2024-12-06T10:18:14,326 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T10:18:14,326 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-06T10:18:14,326 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d2fcaf53b78370d16571065501f9880b:C, priority=-2147483648, current under compaction store size is 3 2024-12-06T10:18:14,326 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-06T10:18:14,326 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 47850 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-06T10:18:14,327 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 50290 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-06T10:18:14,327 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HStore(1540): d2fcaf53b78370d16571065501f9880b/B is initiating minor compaction (all files) 2024-12-06T10:18:14,327 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d2fcaf53b78370d16571065501f9880b/B in TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b. 2024-12-06T10:18:14,327 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HStore(1540): d2fcaf53b78370d16571065501f9880b/A is initiating minor compaction (all files) 2024-12-06T10:18:14,327 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d2fcaf53b78370d16571065501f9880b/A in TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b. 2024-12-06T10:18:14,327 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/B/4f606fd6699d4154b063b5a9d6f3dc20, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/B/53f03797a9b84f858aa8d9d881ebaea5, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/B/f0e35ee7e5db424c8500f70d5267d777, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/B/20a31f12d9a14002865f3fa020c62350] into tmpdir=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/.tmp, totalSize=46.7 K 2024-12-06T10:18:14,327 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/A/8cac81a5fbff41ceba93bdae284d45ca, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/A/21f9f8e2aa9547b7a1381c695c063d2c, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/A/9223034b48e5469db4f66b5f8619c93c, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/A/bc076e49b22448efa88c0b5f275966f9] into tmpdir=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/.tmp, totalSize=49.1 K 2024-12-06T10:18:14,327 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.Compactor(224): Compacting 4f606fd6699d4154b063b5a9d6f3dc20, keycount=150, bloomtype=ROW, size=13.1 K, encoding=NONE, compression=NONE, seqNum=425, earliestPutTs=1733480289532 2024-12-06T10:18:14,327 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.Compactor(224): Compacting 8cac81a5fbff41ceba93bdae284d45ca, keycount=150, bloomtype=ROW, size=13.1 K, encoding=NONE, compression=NONE, seqNum=425, earliestPutTs=1733480289532 2024-12-06T10:18:14,327 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.Compactor(224): Compacting 53f03797a9b84f858aa8d9d881ebaea5, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=451, earliestPutTs=1733480289644 2024-12-06T10:18:14,327 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.Compactor(224): Compacting 21f9f8e2aa9547b7a1381c695c063d2c, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=451, earliestPutTs=1733480289644 2024-12-06T10:18:14,328 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.Compactor(224): Compacting f0e35ee7e5db424c8500f70d5267d777, keycount=100, bloomtype=ROW, size=9.6 K, encoding=NONE, compression=NONE, seqNum=464, earliestPutTs=1733480289794 2024-12-06T10:18:14,328 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.Compactor(224): Compacting 9223034b48e5469db4f66b5f8619c93c, keycount=100, bloomtype=ROW, size=9.6 K, encoding=NONE, compression=NONE, seqNum=464, earliestPutTs=1733480289794 2024-12-06T10:18:14,328 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.Compactor(224): Compacting bc076e49b22448efa88c0b5f275966f9, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=491, earliestPutTs=1733480290952 2024-12-06T10:18:14,328 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.Compactor(224): Compacting 20a31f12d9a14002865f3fa020c62350, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=491, earliestPutTs=1733480290952 2024-12-06T10:18:14,335 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): d2fcaf53b78370d16571065501f9880b#A#compaction#347 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-06T10:18:14,335 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/.tmp/A/5e6a5622f6214d8c840411d3c0a48e16 is 50, key is test_row_0/A:col10/1733480290956/Put/seqid=0 2024-12-06T10:18:14,335 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): d2fcaf53b78370d16571065501f9880b#B#compaction#348 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-06T10:18:14,336 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/.tmp/B/ba50db72a428495c994eec67efa60043 is 50, key is test_row_0/B:col10/1733480290956/Put/seqid=0 2024-12-06T10:18:14,338 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742223_1399 (size=13527) 2024-12-06T10:18:14,339 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742224_1400 (size=13527) 2024-12-06T10:18:14,743 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/.tmp/B/ba50db72a428495c994eec67efa60043 as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/B/ba50db72a428495c994eec67efa60043 2024-12-06T10:18:14,743 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/.tmp/A/5e6a5622f6214d8c840411d3c0a48e16 as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/A/5e6a5622f6214d8c840411d3c0a48e16 2024-12-06T10:18:14,746 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in d2fcaf53b78370d16571065501f9880b/B of d2fcaf53b78370d16571065501f9880b into ba50db72a428495c994eec67efa60043(size=13.2 K), total size for store is 13.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-06T10:18:14,746 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in d2fcaf53b78370d16571065501f9880b/A of d2fcaf53b78370d16571065501f9880b into 5e6a5622f6214d8c840411d3c0a48e16(size=13.2 K), total size for store is 13.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-06T10:18:14,746 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d2fcaf53b78370d16571065501f9880b: 2024-12-06T10:18:14,746 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d2fcaf53b78370d16571065501f9880b: 2024-12-06T10:18:14,747 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b., storeName=d2fcaf53b78370d16571065501f9880b/A, priority=12, startTime=1733480294325; duration=0sec 2024-12-06T10:18:14,747 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b., storeName=d2fcaf53b78370d16571065501f9880b/B, priority=12, startTime=1733480294326; duration=0sec 2024-12-06T10:18:14,747 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-06T10:18:14,747 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d2fcaf53b78370d16571065501f9880b:A 2024-12-06T10:18:14,747 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-06T10:18:14,747 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d2fcaf53b78370d16571065501f9880b:B 2024-12-06T10:18:14,747 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-06T10:18:14,748 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 47850 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-06T10:18:14,748 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HStore(1540): d2fcaf53b78370d16571065501f9880b/C is initiating minor compaction (all files) 2024-12-06T10:18:14,748 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d2fcaf53b78370d16571065501f9880b/C in TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b. 2024-12-06T10:18:14,748 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/C/2664288a38934ad29c92a1ebea03f2dd, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/C/e483ae395c87449a84d7c97a44cb6ca1, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/C/60060ceed41d4f0d9a326be7dd2dba96, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/C/cb1e99645b4c47f79228273e6c444fca] into tmpdir=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/.tmp, totalSize=46.7 K 2024-12-06T10:18:14,748 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.Compactor(224): Compacting 2664288a38934ad29c92a1ebea03f2dd, keycount=150, bloomtype=ROW, size=13.1 K, encoding=NONE, compression=NONE, seqNum=425, earliestPutTs=1733480289532 2024-12-06T10:18:14,748 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.Compactor(224): Compacting e483ae395c87449a84d7c97a44cb6ca1, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=451, earliestPutTs=1733480289644 2024-12-06T10:18:14,749 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.Compactor(224): Compacting 60060ceed41d4f0d9a326be7dd2dba96, keycount=100, bloomtype=ROW, size=9.6 K, encoding=NONE, compression=NONE, seqNum=464, earliestPutTs=1733480289794 2024-12-06T10:18:14,749 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.Compactor(224): Compacting cb1e99645b4c47f79228273e6c444fca, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=491, earliestPutTs=1733480290952 2024-12-06T10:18:14,755 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): d2fcaf53b78370d16571065501f9880b#C#compaction#349 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-06T10:18:14,755 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/.tmp/C/9209c2e24eff4bc9808a1a9604f7ed3c is 50, key is test_row_0/C:col10/1733480290956/Put/seqid=0 2024-12-06T10:18:14,758 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742225_1401 (size=13527) 2024-12-06T10:18:15,162 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/.tmp/C/9209c2e24eff4bc9808a1a9604f7ed3c as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/C/9209c2e24eff4bc9808a1a9604f7ed3c 2024-12-06T10:18:15,165 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in d2fcaf53b78370d16571065501f9880b/C of d2fcaf53b78370d16571065501f9880b into 9209c2e24eff4bc9808a1a9604f7ed3c(size=13.2 K), total size for store is 13.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-06T10:18:15,165 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d2fcaf53b78370d16571065501f9880b: 2024-12-06T10:18:15,165 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b., storeName=d2fcaf53b78370d16571065501f9880b/C, priority=12, startTime=1733480294326; duration=0sec 2024-12-06T10:18:15,166 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T10:18:15,166 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d2fcaf53b78370d16571065501f9880b:C 2024-12-06T10:18:21,018 DEBUG [Thread-1313 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x64ee0130 to 127.0.0.1:61610 2024-12-06T10:18:21,018 DEBUG [Thread-1313 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-06T10:18:21,058 DEBUG [Thread-1315 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x683b64c3 to 127.0.0.1:61610 2024-12-06T10:18:21,059 DEBUG [Thread-1315 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-06T10:18:21,059 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(392): Finished test. Writers: 2024-12-06T10:18:21,059 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 339 2024-12-06T10:18:21,059 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 12 2024-12-06T10:18:21,059 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 8 2024-12-06T10:18:21,059 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 8 2024-12-06T10:18:21,059 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 19 2024-12-06T10:18:21,059 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(396): Readers: 2024-12-06T10:18:21,059 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 5941 2024-12-06T10:18:21,059 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 6026 2024-12-06T10:18:21,059 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 5871 2024-12-06T10:18:21,059 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 5950 2024-12-06T10:18:21,059 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 6026 2024-12-06T10:18:21,059 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(400): Scanners: 2024-12-06T10:18:21,059 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-12-06T10:18:21,059 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x58341641 to 127.0.0.1:61610 2024-12-06T10:18:21,059 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-06T10:18:21,060 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of TestAcidGuarantees 2024-12-06T10:18:21,060 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.2 disable TestAcidGuarantees 2024-12-06T10:18:21,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] procedure2.ProcedureExecutor(1098): Stored pid=93, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=TestAcidGuarantees 2024-12-06T10:18:21,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=93 2024-12-06T10:18:21,062 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733480301062"}]},"ts":"1733480301062"} 2024-12-06T10:18:21,063 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLING in hbase:meta 2024-12-06T10:18:21,065 INFO [PEWorker-1 {}] procedure.DisableTableProcedure(284): Set TestAcidGuarantees to state=DISABLING 2024-12-06T10:18:21,066 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=94, ppid=93, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=TestAcidGuarantees}] 2024-12-06T10:18:21,067 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=95, ppid=94, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=d2fcaf53b78370d16571065501f9880b, UNASSIGN}] 2024-12-06T10:18:21,067 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=95, ppid=94, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=d2fcaf53b78370d16571065501f9880b, UNASSIGN 2024-12-06T10:18:21,068 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=95 updating hbase:meta row=d2fcaf53b78370d16571065501f9880b, regionState=CLOSING, regionLocation=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:21,069 DEBUG [PEWorker-3 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-06T10:18:21,069 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=96, ppid=95, state=RUNNABLE; CloseRegionProcedure d2fcaf53b78370d16571065501f9880b, server=552d6a33fa09,33397,1733480204743}] 2024-12-06T10:18:21,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=93 2024-12-06T10:18:21,220 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,33397,1733480204743 2024-12-06T10:18:21,220 INFO [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] handler.UnassignRegionHandler(124): Close d2fcaf53b78370d16571065501f9880b 2024-12-06T10:18:21,220 DEBUG [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-06T10:18:21,220 DEBUG [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] regionserver.HRegion(1681): Closing d2fcaf53b78370d16571065501f9880b, disabling compactions & flushes 2024-12-06T10:18:21,221 INFO [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b. 2024-12-06T10:18:21,221 DEBUG [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b. 2024-12-06T10:18:21,221 DEBUG [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b. after waiting 0 ms 2024-12-06T10:18:21,221 DEBUG [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b. 2024-12-06T10:18:21,221 INFO [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] regionserver.HRegion(2837): Flushing d2fcaf53b78370d16571065501f9880b 3/3 column families, dataSize=26.84 KB heapSize=71.06 KB 2024-12-06T10:18:21,221 DEBUG [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d2fcaf53b78370d16571065501f9880b, store=A 2024-12-06T10:18:21,221 DEBUG [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:18:21,221 DEBUG [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d2fcaf53b78370d16571065501f9880b, store=B 2024-12-06T10:18:21,221 DEBUG [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:18:21,221 DEBUG [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d2fcaf53b78370d16571065501f9880b, store=C 2024-12-06T10:18:21,221 DEBUG [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:18:21,225 DEBUG [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/.tmp/A/eaad11c5f27a433aa468f5d5cd965bf8 is 50, key is test_row_2/A:col10/1733480301057/Put/seqid=0 2024-12-06T10:18:21,228 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742226_1402 (size=7415) 2024-12-06T10:18:21,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=93 2024-12-06T10:18:21,628 INFO [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=8.95 KB at sequenceid=501 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/.tmp/A/eaad11c5f27a433aa468f5d5cd965bf8 2024-12-06T10:18:21,635 DEBUG [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/.tmp/B/8354396e834648c59ac6a43c23bb905c is 50, key is test_row_2/B:col10/1733480301057/Put/seqid=0 2024-12-06T10:18:21,638 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742227_1403 (size=7415) 2024-12-06T10:18:21,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=93 2024-12-06T10:18:22,038 INFO [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=8.95 KB at sequenceid=501 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/.tmp/B/8354396e834648c59ac6a43c23bb905c 2024-12-06T10:18:22,044 DEBUG [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/.tmp/C/596bb346247848dc9f3b32e324cf549d is 50, key is test_row_2/C:col10/1733480301057/Put/seqid=0 2024-12-06T10:18:22,047 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742228_1404 (size=7415) 2024-12-06T10:18:22,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=93 2024-12-06T10:18:22,448 INFO [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=8.95 KB at sequenceid=501 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/.tmp/C/596bb346247848dc9f3b32e324cf549d 2024-12-06T10:18:22,452 DEBUG [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/.tmp/A/eaad11c5f27a433aa468f5d5cd965bf8 as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/A/eaad11c5f27a433aa468f5d5cd965bf8 2024-12-06T10:18:22,455 INFO [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/A/eaad11c5f27a433aa468f5d5cd965bf8, entries=50, sequenceid=501, filesize=7.2 K 2024-12-06T10:18:22,455 DEBUG [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/.tmp/B/8354396e834648c59ac6a43c23bb905c as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/B/8354396e834648c59ac6a43c23bb905c 2024-12-06T10:18:22,458 INFO [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/B/8354396e834648c59ac6a43c23bb905c, entries=50, sequenceid=501, filesize=7.2 K 2024-12-06T10:18:22,459 DEBUG [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/.tmp/C/596bb346247848dc9f3b32e324cf549d as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/C/596bb346247848dc9f3b32e324cf549d 2024-12-06T10:18:22,462 INFO [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/C/596bb346247848dc9f3b32e324cf549d, entries=50, sequenceid=501, filesize=7.2 K 2024-12-06T10:18:22,462 INFO [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] regionserver.HRegion(3040): Finished flush of dataSize ~26.84 KB/27480, heapSize ~71.02 KB/72720, currentSize=0 B/0 for d2fcaf53b78370d16571065501f9880b in 1241ms, sequenceid=501, compaction requested=false 2024-12-06T10:18:22,463 DEBUG [StoreCloser-TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/A/2d68d28dbd6949318fca792f0e19ddca, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/A/467650f799504aef96acc78c92d527f1, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/A/7888a79dcea74802ba0695d1927ced71, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/A/432a82c592324f94a7d2eebdc64c0219, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/A/cd049b8bd4094a66a8d1c2d57253689a, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/A/c3b2e21d0a2d4c6ca14dcfa29dd7a93c, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/A/44cd6c6e89f64563828b045fcd9425cf, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/A/dfd6cedd381945bcae195365d803c1e3, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/A/331f12ea77284796bb3106fbea9bc829, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/A/9cee9cb6e20547d1bd5886525467e9bb, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/A/bdb9067db6b44ebe9fa2eafde6e3c90c, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/A/8035ae33d05d4fd68a60dedcafec4c7a, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/A/99a841503b9745c2941195cc9a9b4f04, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/A/34575bd6e68d4110b6db3609b95b9a4d, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/A/7147eae646c141df84461f420ee19fe3, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/A/3e65179b747744dc90dcb388a7447dbc, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/A/2128398bfd284391b81a2032e9e90043, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/A/de4b86fbb53f4682af7f0264e4661050, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/A/81a993ae8ff6493d990e41ceaf280351, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/A/8ce1a53d35d241bfb2066fa64ad2aca2, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/A/57a2481bb78a4472a5b317cdd61b95aa, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/A/db595ed82f7840618906dbbb66072c58, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/A/1a383231d6904ffe8d9e0ac10941cbc2, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/A/60d3e6f72a64444c99021150c8ebb73e, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/A/a852214f13cd42d3b8bdd32a4f7cb17c, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/A/60ed4899d70347069f82821b5d63c3ed, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/A/acd0796d1bef4b589c1ed0fa9655fd07, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/A/70d3db0edaa54f8ea48e1e023cdd2fd0, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/A/67d54b9fa4d84b898ba13044e0c7a876, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/A/71a531f0196b473b9f8d37233720953e, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/A/c83e5815ee9b41669bbd71e57f0ed4a5, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/A/633b4122011a41f1a2c812d1a78f8cfb, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/A/8cac81a5fbff41ceba93bdae284d45ca, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/A/21f9f8e2aa9547b7a1381c695c063d2c, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/A/9223034b48e5469db4f66b5f8619c93c, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/A/bc076e49b22448efa88c0b5f275966f9] to archive 2024-12-06T10:18:22,464 DEBUG [StoreCloser-TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-12-06T10:18:22,465 DEBUG [StoreCloser-TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/A/2d68d28dbd6949318fca792f0e19ddca to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/A/2d68d28dbd6949318fca792f0e19ddca 2024-12-06T10:18:22,466 DEBUG [StoreCloser-TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/A/467650f799504aef96acc78c92d527f1 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/A/467650f799504aef96acc78c92d527f1 2024-12-06T10:18:22,467 DEBUG [StoreCloser-TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/A/7888a79dcea74802ba0695d1927ced71 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/A/7888a79dcea74802ba0695d1927ced71 2024-12-06T10:18:22,468 DEBUG [StoreCloser-TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/A/432a82c592324f94a7d2eebdc64c0219 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/A/432a82c592324f94a7d2eebdc64c0219 2024-12-06T10:18:22,469 DEBUG [StoreCloser-TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/A/cd049b8bd4094a66a8d1c2d57253689a to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/A/cd049b8bd4094a66a8d1c2d57253689a 2024-12-06T10:18:22,470 DEBUG [StoreCloser-TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/A/c3b2e21d0a2d4c6ca14dcfa29dd7a93c to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/A/c3b2e21d0a2d4c6ca14dcfa29dd7a93c 2024-12-06T10:18:22,471 DEBUG [StoreCloser-TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/A/44cd6c6e89f64563828b045fcd9425cf to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/A/44cd6c6e89f64563828b045fcd9425cf 2024-12-06T10:18:22,471 DEBUG [StoreCloser-TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/A/dfd6cedd381945bcae195365d803c1e3 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/A/dfd6cedd381945bcae195365d803c1e3 2024-12-06T10:18:22,472 DEBUG [StoreCloser-TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/A/331f12ea77284796bb3106fbea9bc829 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/A/331f12ea77284796bb3106fbea9bc829 2024-12-06T10:18:22,473 DEBUG [StoreCloser-TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/A/9cee9cb6e20547d1bd5886525467e9bb to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/A/9cee9cb6e20547d1bd5886525467e9bb 2024-12-06T10:18:22,474 DEBUG [StoreCloser-TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/A/bdb9067db6b44ebe9fa2eafde6e3c90c to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/A/bdb9067db6b44ebe9fa2eafde6e3c90c 2024-12-06T10:18:22,475 DEBUG [StoreCloser-TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/A/8035ae33d05d4fd68a60dedcafec4c7a to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/A/8035ae33d05d4fd68a60dedcafec4c7a 2024-12-06T10:18:22,476 DEBUG [StoreCloser-TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/A/99a841503b9745c2941195cc9a9b4f04 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/A/99a841503b9745c2941195cc9a9b4f04 2024-12-06T10:18:22,477 DEBUG [StoreCloser-TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/A/34575bd6e68d4110b6db3609b95b9a4d to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/A/34575bd6e68d4110b6db3609b95b9a4d 2024-12-06T10:18:22,478 DEBUG [StoreCloser-TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/A/7147eae646c141df84461f420ee19fe3 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/A/7147eae646c141df84461f420ee19fe3 2024-12-06T10:18:22,478 DEBUG [StoreCloser-TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/A/3e65179b747744dc90dcb388a7447dbc to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/A/3e65179b747744dc90dcb388a7447dbc 2024-12-06T10:18:22,479 DEBUG [StoreCloser-TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/A/2128398bfd284391b81a2032e9e90043 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/A/2128398bfd284391b81a2032e9e90043 2024-12-06T10:18:22,480 DEBUG [StoreCloser-TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/A/de4b86fbb53f4682af7f0264e4661050 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/A/de4b86fbb53f4682af7f0264e4661050 2024-12-06T10:18:22,481 DEBUG [StoreCloser-TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/A/81a993ae8ff6493d990e41ceaf280351 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/A/81a993ae8ff6493d990e41ceaf280351 2024-12-06T10:18:22,482 DEBUG [StoreCloser-TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/A/8ce1a53d35d241bfb2066fa64ad2aca2 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/A/8ce1a53d35d241bfb2066fa64ad2aca2 2024-12-06T10:18:22,483 DEBUG [StoreCloser-TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/A/57a2481bb78a4472a5b317cdd61b95aa to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/A/57a2481bb78a4472a5b317cdd61b95aa 2024-12-06T10:18:22,483 DEBUG [StoreCloser-TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/A/db595ed82f7840618906dbbb66072c58 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/A/db595ed82f7840618906dbbb66072c58 2024-12-06T10:18:22,484 DEBUG [StoreCloser-TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/A/1a383231d6904ffe8d9e0ac10941cbc2 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/A/1a383231d6904ffe8d9e0ac10941cbc2 2024-12-06T10:18:22,485 DEBUG [StoreCloser-TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/A/60d3e6f72a64444c99021150c8ebb73e to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/A/60d3e6f72a64444c99021150c8ebb73e 2024-12-06T10:18:22,486 DEBUG [StoreCloser-TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/A/a852214f13cd42d3b8bdd32a4f7cb17c to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/A/a852214f13cd42d3b8bdd32a4f7cb17c 2024-12-06T10:18:22,487 DEBUG [StoreCloser-TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/A/60ed4899d70347069f82821b5d63c3ed to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/A/60ed4899d70347069f82821b5d63c3ed 2024-12-06T10:18:22,488 DEBUG [StoreCloser-TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/A/acd0796d1bef4b589c1ed0fa9655fd07 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/A/acd0796d1bef4b589c1ed0fa9655fd07 2024-12-06T10:18:22,488 DEBUG [StoreCloser-TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/A/70d3db0edaa54f8ea48e1e023cdd2fd0 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/A/70d3db0edaa54f8ea48e1e023cdd2fd0 2024-12-06T10:18:22,489 DEBUG [StoreCloser-TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/A/67d54b9fa4d84b898ba13044e0c7a876 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/A/67d54b9fa4d84b898ba13044e0c7a876 2024-12-06T10:18:22,490 DEBUG [StoreCloser-TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/A/71a531f0196b473b9f8d37233720953e to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/A/71a531f0196b473b9f8d37233720953e 2024-12-06T10:18:22,491 DEBUG [StoreCloser-TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/A/c83e5815ee9b41669bbd71e57f0ed4a5 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/A/c83e5815ee9b41669bbd71e57f0ed4a5 2024-12-06T10:18:22,492 DEBUG [StoreCloser-TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/A/633b4122011a41f1a2c812d1a78f8cfb to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/A/633b4122011a41f1a2c812d1a78f8cfb 2024-12-06T10:18:22,493 DEBUG [StoreCloser-TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/A/8cac81a5fbff41ceba93bdae284d45ca to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/A/8cac81a5fbff41ceba93bdae284d45ca 2024-12-06T10:18:22,494 DEBUG [StoreCloser-TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/A/21f9f8e2aa9547b7a1381c695c063d2c to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/A/21f9f8e2aa9547b7a1381c695c063d2c 2024-12-06T10:18:22,495 DEBUG [StoreCloser-TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/A/9223034b48e5469db4f66b5f8619c93c to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/A/9223034b48e5469db4f66b5f8619c93c 2024-12-06T10:18:22,495 DEBUG [StoreCloser-TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/A/bc076e49b22448efa88c0b5f275966f9 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/A/bc076e49b22448efa88c0b5f275966f9 2024-12-06T10:18:22,497 DEBUG [StoreCloser-TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/B/0b25f74849814bc48784f8fdd481131b, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/B/c9a39067726d44c696cab789b22d20b7, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/B/26e43629ebf948a6933e35eb5369c903, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/B/91120bcc98a244bda205ba8c69b31bb3, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/B/3d7dd87f16d64d648a9111481768cde6, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/B/af9cf15d7d254c7b9f9921975582767a, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/B/18ecfc49c81c40ca96e419db0363d45d, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/B/5a7b4e42b4624c80bdf6acb82a09a704, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/B/aad0cf12251a4f1a883da2908a602e46, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/B/e95257d7b5ed4d52a6a1f6ebacfd2c87, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/B/847103391737481fbbd3bd0c748725bd, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/B/04b390015ada4d3693b5162993a09ada, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/B/3b8ddd5defd947e7a183562979589006, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/B/a5aa60f0a5074786b5117c290b9c10b7, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/B/187af28b04b8491d893ad3590ef1c49c, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/B/3e97c830b57c4ba7ba93120998403622, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/B/327fb56ddee142178ae19135e5a3c1f6, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/B/4fe7587ef4314f42af2af78903edd40c, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/B/5424735984eb44cb90cd6ea439e0c6a2, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/B/8d70e459b4cd4fa0811fb839ca73eceb, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/B/73b70a4a1a794274bfa1f5c897df123a, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/B/cbb1c0517fb2402f89b8234250228e69, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/B/143ebfc7f0714c23ba767d1891bc8467, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/B/c32ae520826f4cc9978467326ab0f4c2, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/B/4401bacda07a4a0ead744c896fa74916, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/B/da3fab07135140f9a76d3c36edda0c55, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/B/60b12ed22a2e45beac5599b864b46952, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/B/b1f1baf2f4a44cb080e1ab0fdfe8fcbc, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/B/bb2a8d95a9a94c60b49d27fd1607c5b0, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/B/74bc2abc9c2c48e288e8bd7c83c67ea8, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/B/77235a7fd99b4403b49ae521e305cb1e, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/B/4f606fd6699d4154b063b5a9d6f3dc20, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/B/c05843223b8749e6bad1b4dfce007a30, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/B/53f03797a9b84f858aa8d9d881ebaea5, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/B/f0e35ee7e5db424c8500f70d5267d777, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/B/20a31f12d9a14002865f3fa020c62350] to archive 2024-12-06T10:18:22,497 DEBUG [StoreCloser-TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-12-06T10:18:22,498 DEBUG [StoreCloser-TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/B/0b25f74849814bc48784f8fdd481131b to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/B/0b25f74849814bc48784f8fdd481131b 2024-12-06T10:18:22,499 DEBUG [StoreCloser-TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/B/c9a39067726d44c696cab789b22d20b7 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/B/c9a39067726d44c696cab789b22d20b7 2024-12-06T10:18:22,500 DEBUG [StoreCloser-TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/B/26e43629ebf948a6933e35eb5369c903 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/B/26e43629ebf948a6933e35eb5369c903 2024-12-06T10:18:22,501 DEBUG [StoreCloser-TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/B/91120bcc98a244bda205ba8c69b31bb3 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/B/91120bcc98a244bda205ba8c69b31bb3 2024-12-06T10:18:22,502 DEBUG [StoreCloser-TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/B/3d7dd87f16d64d648a9111481768cde6 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/B/3d7dd87f16d64d648a9111481768cde6 2024-12-06T10:18:22,502 DEBUG [StoreCloser-TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/B/af9cf15d7d254c7b9f9921975582767a to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/B/af9cf15d7d254c7b9f9921975582767a 2024-12-06T10:18:22,503 DEBUG [StoreCloser-TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/B/18ecfc49c81c40ca96e419db0363d45d to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/B/18ecfc49c81c40ca96e419db0363d45d 2024-12-06T10:18:22,504 DEBUG [StoreCloser-TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/B/5a7b4e42b4624c80bdf6acb82a09a704 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/B/5a7b4e42b4624c80bdf6acb82a09a704 2024-12-06T10:18:22,505 DEBUG [StoreCloser-TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/B/aad0cf12251a4f1a883da2908a602e46 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/B/aad0cf12251a4f1a883da2908a602e46 2024-12-06T10:18:22,506 DEBUG [StoreCloser-TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/B/e95257d7b5ed4d52a6a1f6ebacfd2c87 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/B/e95257d7b5ed4d52a6a1f6ebacfd2c87 2024-12-06T10:18:22,506 DEBUG [StoreCloser-TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/B/847103391737481fbbd3bd0c748725bd to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/B/847103391737481fbbd3bd0c748725bd 2024-12-06T10:18:22,507 DEBUG [StoreCloser-TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/B/04b390015ada4d3693b5162993a09ada to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/B/04b390015ada4d3693b5162993a09ada 2024-12-06T10:18:22,508 DEBUG [StoreCloser-TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/B/3b8ddd5defd947e7a183562979589006 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/B/3b8ddd5defd947e7a183562979589006 2024-12-06T10:18:22,509 DEBUG [StoreCloser-TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/B/a5aa60f0a5074786b5117c290b9c10b7 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/B/a5aa60f0a5074786b5117c290b9c10b7 2024-12-06T10:18:22,510 DEBUG [StoreCloser-TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/B/187af28b04b8491d893ad3590ef1c49c to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/B/187af28b04b8491d893ad3590ef1c49c 2024-12-06T10:18:22,511 DEBUG [StoreCloser-TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/B/3e97c830b57c4ba7ba93120998403622 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/B/3e97c830b57c4ba7ba93120998403622 2024-12-06T10:18:22,512 DEBUG [StoreCloser-TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/B/327fb56ddee142178ae19135e5a3c1f6 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/B/327fb56ddee142178ae19135e5a3c1f6 2024-12-06T10:18:22,513 DEBUG [StoreCloser-TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/B/4fe7587ef4314f42af2af78903edd40c to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/B/4fe7587ef4314f42af2af78903edd40c 2024-12-06T10:18:22,513 DEBUG [StoreCloser-TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/B/5424735984eb44cb90cd6ea439e0c6a2 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/B/5424735984eb44cb90cd6ea439e0c6a2 2024-12-06T10:18:22,514 DEBUG [StoreCloser-TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/B/8d70e459b4cd4fa0811fb839ca73eceb to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/B/8d70e459b4cd4fa0811fb839ca73eceb 2024-12-06T10:18:22,515 DEBUG [StoreCloser-TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/B/73b70a4a1a794274bfa1f5c897df123a to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/B/73b70a4a1a794274bfa1f5c897df123a 2024-12-06T10:18:22,516 DEBUG [StoreCloser-TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/B/cbb1c0517fb2402f89b8234250228e69 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/B/cbb1c0517fb2402f89b8234250228e69 2024-12-06T10:18:22,517 DEBUG [StoreCloser-TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/B/143ebfc7f0714c23ba767d1891bc8467 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/B/143ebfc7f0714c23ba767d1891bc8467 2024-12-06T10:18:22,517 DEBUG [StoreCloser-TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/B/c32ae520826f4cc9978467326ab0f4c2 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/B/c32ae520826f4cc9978467326ab0f4c2 2024-12-06T10:18:22,518 DEBUG [StoreCloser-TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/B/4401bacda07a4a0ead744c896fa74916 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/B/4401bacda07a4a0ead744c896fa74916 2024-12-06T10:18:22,519 DEBUG [StoreCloser-TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/B/da3fab07135140f9a76d3c36edda0c55 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/B/da3fab07135140f9a76d3c36edda0c55 2024-12-06T10:18:22,520 DEBUG [StoreCloser-TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/B/60b12ed22a2e45beac5599b864b46952 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/B/60b12ed22a2e45beac5599b864b46952 2024-12-06T10:18:22,521 DEBUG [StoreCloser-TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/B/b1f1baf2f4a44cb080e1ab0fdfe8fcbc to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/B/b1f1baf2f4a44cb080e1ab0fdfe8fcbc 2024-12-06T10:18:22,522 DEBUG [StoreCloser-TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/B/bb2a8d95a9a94c60b49d27fd1607c5b0 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/B/bb2a8d95a9a94c60b49d27fd1607c5b0 2024-12-06T10:18:22,522 DEBUG [StoreCloser-TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/B/74bc2abc9c2c48e288e8bd7c83c67ea8 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/B/74bc2abc9c2c48e288e8bd7c83c67ea8 2024-12-06T10:18:22,523 DEBUG [StoreCloser-TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/B/77235a7fd99b4403b49ae521e305cb1e to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/B/77235a7fd99b4403b49ae521e305cb1e 2024-12-06T10:18:22,524 DEBUG [StoreCloser-TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/B/4f606fd6699d4154b063b5a9d6f3dc20 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/B/4f606fd6699d4154b063b5a9d6f3dc20 2024-12-06T10:18:22,525 DEBUG [StoreCloser-TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/B/c05843223b8749e6bad1b4dfce007a30 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/B/c05843223b8749e6bad1b4dfce007a30 2024-12-06T10:18:22,525 DEBUG [StoreCloser-TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/B/53f03797a9b84f858aa8d9d881ebaea5 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/B/53f03797a9b84f858aa8d9d881ebaea5 2024-12-06T10:18:22,526 DEBUG [StoreCloser-TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/B/f0e35ee7e5db424c8500f70d5267d777 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/B/f0e35ee7e5db424c8500f70d5267d777 2024-12-06T10:18:22,527 DEBUG [StoreCloser-TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/B/20a31f12d9a14002865f3fa020c62350 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/B/20a31f12d9a14002865f3fa020c62350 2024-12-06T10:18:22,528 DEBUG [StoreCloser-TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/C/04d5bcc116024343a214486e6cd1ab35, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/C/818d811852df485ea360cb1a7a946468, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/C/b44832bc3bcb4bf4ab827c5f0680e417, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/C/6a32c6c29a1d403393220e0da06cbf6b, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/C/da2c34349ff34d38beb0dafdd07f2b85, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/C/3051800afebd48828f0fe8ecbd15d8f5, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/C/8e169e7db6724c1caed2c645d64d167d, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/C/684f37f50206416d8695f8fad7b244a1, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/C/e8ac6f4366f14f04985f0c14846b1dea, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/C/7370127546684b45a4b2cfa0f185ceaf, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/C/b09f15d14aca40798728530c024c027b, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/C/37e1a8ecf9de4777a3dc449120a3c8f4, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/C/6199bad2dd7848b38177793044a7012a, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/C/cf48dccda030418ca9ea5b03d7f9fd26, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/C/94ec74b5c888409b9c9e97280f18e6a6, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/C/d5364d6984d042c693510973f2847583, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/C/57dba0f12fff467f8299e8bee8b693ac, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/C/a9ffef46b15745dc9f224adfa976051d, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/C/64978c8a6c57499d9dc6a8c489558260, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/C/3956c661bff84474b73ce38db2ac27d8, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/C/fdc2cd6c21da45fd8cba709c70074e47, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/C/47480a6b8dd3418dae645bcd7f339632, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/C/5fd189542edb4c67a468a1578c72128c, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/C/42edaba8a4f94ce9a79c89ff97eea1f5, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/C/cc387f5eadde4d1680e53b7d6698e002, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/C/5a6cbb35aadc483abdca1eebeed2e6ac, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/C/865d89c489074c8db6d6c3824a21fc1f, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/C/4f25066c47774849b1c4a292b3f1272c, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/C/2231299032634c46a01c033e9412438e, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/C/d7aaaf0378814f2d92130d4ccc41b866, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/C/895dc08dbab44673ba46b783956ba7a5, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/C/2664288a38934ad29c92a1ebea03f2dd, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/C/b7fe98bfe5c14338ae9fe1af1726d224, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/C/e483ae395c87449a84d7c97a44cb6ca1, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/C/60060ceed41d4f0d9a326be7dd2dba96, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/C/cb1e99645b4c47f79228273e6c444fca] to archive 2024-12-06T10:18:22,529 DEBUG [StoreCloser-TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-12-06T10:18:22,530 DEBUG [StoreCloser-TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/C/04d5bcc116024343a214486e6cd1ab35 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/C/04d5bcc116024343a214486e6cd1ab35 2024-12-06T10:18:22,531 DEBUG [StoreCloser-TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/C/818d811852df485ea360cb1a7a946468 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/C/818d811852df485ea360cb1a7a946468 2024-12-06T10:18:22,532 DEBUG [StoreCloser-TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/C/b44832bc3bcb4bf4ab827c5f0680e417 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/C/b44832bc3bcb4bf4ab827c5f0680e417 2024-12-06T10:18:22,532 DEBUG [StoreCloser-TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/C/6a32c6c29a1d403393220e0da06cbf6b to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/C/6a32c6c29a1d403393220e0da06cbf6b 2024-12-06T10:18:22,533 DEBUG [StoreCloser-TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/C/da2c34349ff34d38beb0dafdd07f2b85 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/C/da2c34349ff34d38beb0dafdd07f2b85 2024-12-06T10:18:22,534 DEBUG [StoreCloser-TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/C/3051800afebd48828f0fe8ecbd15d8f5 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/C/3051800afebd48828f0fe8ecbd15d8f5 2024-12-06T10:18:22,535 DEBUG [StoreCloser-TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/C/8e169e7db6724c1caed2c645d64d167d to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/C/8e169e7db6724c1caed2c645d64d167d 2024-12-06T10:18:22,536 DEBUG [StoreCloser-TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/C/684f37f50206416d8695f8fad7b244a1 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/C/684f37f50206416d8695f8fad7b244a1 2024-12-06T10:18:22,536 DEBUG [StoreCloser-TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/C/e8ac6f4366f14f04985f0c14846b1dea to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/C/e8ac6f4366f14f04985f0c14846b1dea 2024-12-06T10:18:22,537 DEBUG [StoreCloser-TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/C/7370127546684b45a4b2cfa0f185ceaf to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/C/7370127546684b45a4b2cfa0f185ceaf 2024-12-06T10:18:22,538 DEBUG [StoreCloser-TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/C/b09f15d14aca40798728530c024c027b to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/C/b09f15d14aca40798728530c024c027b 2024-12-06T10:18:22,539 DEBUG [StoreCloser-TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/C/37e1a8ecf9de4777a3dc449120a3c8f4 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/C/37e1a8ecf9de4777a3dc449120a3c8f4 2024-12-06T10:18:22,540 DEBUG [StoreCloser-TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/C/6199bad2dd7848b38177793044a7012a to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/C/6199bad2dd7848b38177793044a7012a 2024-12-06T10:18:22,540 DEBUG [StoreCloser-TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/C/cf48dccda030418ca9ea5b03d7f9fd26 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/C/cf48dccda030418ca9ea5b03d7f9fd26 2024-12-06T10:18:22,541 DEBUG [StoreCloser-TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/C/94ec74b5c888409b9c9e97280f18e6a6 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/C/94ec74b5c888409b9c9e97280f18e6a6 2024-12-06T10:18:22,542 DEBUG [StoreCloser-TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/C/d5364d6984d042c693510973f2847583 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/C/d5364d6984d042c693510973f2847583 2024-12-06T10:18:22,543 DEBUG [StoreCloser-TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/C/57dba0f12fff467f8299e8bee8b693ac to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/C/57dba0f12fff467f8299e8bee8b693ac 2024-12-06T10:18:22,543 DEBUG [StoreCloser-TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/C/a9ffef46b15745dc9f224adfa976051d to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/C/a9ffef46b15745dc9f224adfa976051d 2024-12-06T10:18:22,544 DEBUG [StoreCloser-TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/C/64978c8a6c57499d9dc6a8c489558260 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/C/64978c8a6c57499d9dc6a8c489558260 2024-12-06T10:18:22,545 DEBUG [StoreCloser-TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/C/3956c661bff84474b73ce38db2ac27d8 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/C/3956c661bff84474b73ce38db2ac27d8 2024-12-06T10:18:22,546 DEBUG [StoreCloser-TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/C/fdc2cd6c21da45fd8cba709c70074e47 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/C/fdc2cd6c21da45fd8cba709c70074e47 2024-12-06T10:18:22,546 DEBUG [StoreCloser-TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/C/47480a6b8dd3418dae645bcd7f339632 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/C/47480a6b8dd3418dae645bcd7f339632 2024-12-06T10:18:22,547 DEBUG [StoreCloser-TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/C/5fd189542edb4c67a468a1578c72128c to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/C/5fd189542edb4c67a468a1578c72128c 2024-12-06T10:18:22,548 DEBUG [StoreCloser-TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/C/42edaba8a4f94ce9a79c89ff97eea1f5 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/C/42edaba8a4f94ce9a79c89ff97eea1f5 2024-12-06T10:18:22,549 DEBUG [StoreCloser-TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/C/cc387f5eadde4d1680e53b7d6698e002 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/C/cc387f5eadde4d1680e53b7d6698e002 2024-12-06T10:18:22,549 DEBUG [StoreCloser-TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/C/5a6cbb35aadc483abdca1eebeed2e6ac to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/C/5a6cbb35aadc483abdca1eebeed2e6ac 2024-12-06T10:18:22,550 DEBUG [StoreCloser-TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/C/865d89c489074c8db6d6c3824a21fc1f to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/C/865d89c489074c8db6d6c3824a21fc1f 2024-12-06T10:18:22,551 DEBUG [StoreCloser-TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/C/4f25066c47774849b1c4a292b3f1272c to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/C/4f25066c47774849b1c4a292b3f1272c 2024-12-06T10:18:22,551 DEBUG [StoreCloser-TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/C/2231299032634c46a01c033e9412438e to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/C/2231299032634c46a01c033e9412438e 2024-12-06T10:18:22,552 DEBUG [StoreCloser-TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/C/d7aaaf0378814f2d92130d4ccc41b866 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/C/d7aaaf0378814f2d92130d4ccc41b866 2024-12-06T10:18:22,553 DEBUG [StoreCloser-TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/C/895dc08dbab44673ba46b783956ba7a5 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/C/895dc08dbab44673ba46b783956ba7a5 2024-12-06T10:18:22,554 DEBUG [StoreCloser-TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/C/2664288a38934ad29c92a1ebea03f2dd to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/C/2664288a38934ad29c92a1ebea03f2dd 2024-12-06T10:18:22,554 DEBUG [StoreCloser-TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/C/b7fe98bfe5c14338ae9fe1af1726d224 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/C/b7fe98bfe5c14338ae9fe1af1726d224 2024-12-06T10:18:22,555 DEBUG [StoreCloser-TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/C/e483ae395c87449a84d7c97a44cb6ca1 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/C/e483ae395c87449a84d7c97a44cb6ca1 2024-12-06T10:18:22,556 DEBUG [StoreCloser-TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/C/60060ceed41d4f0d9a326be7dd2dba96 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/C/60060ceed41d4f0d9a326be7dd2dba96 2024-12-06T10:18:22,557 DEBUG [StoreCloser-TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/C/cb1e99645b4c47f79228273e6c444fca to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/C/cb1e99645b4c47f79228273e6c444fca 2024-12-06T10:18:22,561 DEBUG [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/recovered.edits/504.seqid, newMaxSeqId=504, maxSeqId=1 2024-12-06T10:18:22,561 INFO [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b. 2024-12-06T10:18:22,561 DEBUG [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] regionserver.HRegion(1635): Region close journal for d2fcaf53b78370d16571065501f9880b: 2024-12-06T10:18:22,562 INFO [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] handler.UnassignRegionHandler(170): Closed d2fcaf53b78370d16571065501f9880b 2024-12-06T10:18:22,563 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=95 updating hbase:meta row=d2fcaf53b78370d16571065501f9880b, regionState=CLOSED 2024-12-06T10:18:22,565 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=96, resume processing ppid=95 2024-12-06T10:18:22,565 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=96, ppid=95, state=SUCCESS; CloseRegionProcedure d2fcaf53b78370d16571065501f9880b, server=552d6a33fa09,33397,1733480204743 in 1.4950 sec 2024-12-06T10:18:22,566 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=95, resume processing ppid=94 2024-12-06T10:18:22,566 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=95, ppid=94, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=d2fcaf53b78370d16571065501f9880b, UNASSIGN in 1.4980 sec 2024-12-06T10:18:22,567 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=94, resume processing ppid=93 2024-12-06T10:18:22,567 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=94, ppid=93, state=SUCCESS; CloseTableRegionsProcedure table=TestAcidGuarantees in 1.5000 sec 2024-12-06T10:18:22,568 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733480302568"}]},"ts":"1733480302568"} 2024-12-06T10:18:22,569 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLED in hbase:meta 2024-12-06T10:18:22,571 INFO [PEWorker-3 {}] procedure.DisableTableProcedure(296): Set TestAcidGuarantees to state=DISABLED 2024-12-06T10:18:22,572 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=93, state=SUCCESS; DisableTableProcedure table=TestAcidGuarantees in 1.5110 sec 2024-12-06T10:18:23,166 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=93 2024-12-06T10:18:23,166 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:TestAcidGuarantees, procId: 93 completed 2024-12-06T10:18:23,167 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.2 delete TestAcidGuarantees 2024-12-06T10:18:23,167 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] procedure2.ProcedureExecutor(1098): Stored pid=97, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=TestAcidGuarantees 2024-12-06T10:18:23,168 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=97, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-06T10:18:23,169 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=97 2024-12-06T10:18:23,169 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=97, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-06T10:18:23,170 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b 2024-12-06T10:18:23,172 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/A, FileablePath, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/B, FileablePath, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/C, FileablePath, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/recovered.edits] 2024-12-06T10:18:23,174 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/A/5e6a5622f6214d8c840411d3c0a48e16 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/A/5e6a5622f6214d8c840411d3c0a48e16 2024-12-06T10:18:23,174 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/A/eaad11c5f27a433aa468f5d5cd965bf8 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/A/eaad11c5f27a433aa468f5d5cd965bf8 2024-12-06T10:18:23,176 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/B/8354396e834648c59ac6a43c23bb905c to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/B/8354396e834648c59ac6a43c23bb905c 2024-12-06T10:18:23,177 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/B/ba50db72a428495c994eec67efa60043 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/B/ba50db72a428495c994eec67efa60043 2024-12-06T10:18:23,179 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/C/596bb346247848dc9f3b32e324cf549d to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/C/596bb346247848dc9f3b32e324cf549d 2024-12-06T10:18:23,179 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/C/9209c2e24eff4bc9808a1a9604f7ed3c to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/C/9209c2e24eff4bc9808a1a9604f7ed3c 2024-12-06T10:18:23,184 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/recovered.edits/504.seqid to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b/recovered.edits/504.seqid 2024-12-06T10:18:23,184 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/d2fcaf53b78370d16571065501f9880b 2024-12-06T10:18:23,184 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(313): Archived TestAcidGuarantees regions 2024-12-06T10:18:23,186 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=97, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-06T10:18:23,189 WARN [PEWorker-4 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 1 rows of TestAcidGuarantees from hbase:meta 2024-12-06T10:18:23,190 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(407): Removing 'TestAcidGuarantees' descriptor. 2024-12-06T10:18:23,191 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=97, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-06T10:18:23,191 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(397): Removing 'TestAcidGuarantees' from region states. 2024-12-06T10:18:23,191 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733480303191"}]},"ts":"9223372036854775807"} 2024-12-06T10:18:23,193 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1808): Deleted 1 regions from META 2024-12-06T10:18:23,193 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => d2fcaf53b78370d16571065501f9880b, NAME => 'TestAcidGuarantees,,1733480270537.d2fcaf53b78370d16571065501f9880b.', STARTKEY => '', ENDKEY => ''}] 2024-12-06T10:18:23,193 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(401): Marking 'TestAcidGuarantees' as deleted. 2024-12-06T10:18:23,193 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1733480303193"}]},"ts":"9223372036854775807"} 2024-12-06T10:18:23,194 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1678): Deleted table TestAcidGuarantees state from META 2024-12-06T10:18:23,197 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(133): Finished pid=97, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-06T10:18:23,197 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=97, state=SUCCESS; DeleteTableProcedure table=TestAcidGuarantees in 30 msec 2024-12-06T10:18:23,269 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=97 2024-12-06T10:18:23,270 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:TestAcidGuarantees, procId: 97 completed 2024-12-06T10:18:23,280 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: TestAcidGuaranteesWithAdaptivePolicy#testGetAtomicity Thread=237 (was 241), OpenFileDescriptor=452 (was 463), MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=401 (was 382) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=7228 (was 6262) - AvailableMemoryMB LEAK? - 2024-12-06T10:18:23,290 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: TestAcidGuaranteesWithAdaptivePolicy#testMobScanAtomicity Thread=237, OpenFileDescriptor=452, MaxFileDescriptor=1048576, SystemLoadAverage=401, ProcessCount=11, AvailableMemoryMB=7227 2024-12-06T10:18:23,291 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-12-06T10:18:23,292 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-06T10:18:23,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] procedure2.ProcedureExecutor(1098): Stored pid=98, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=TestAcidGuarantees 2024-12-06T10:18:23,294 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=98, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_PRE_OPERATION 2024-12-06T10:18:23,294 DEBUG [PEWorker-5 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:23,294 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestAcidGuarantees" procId is: 98 2024-12-06T10:18:23,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=98 2024-12-06T10:18:23,295 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=98, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-06T10:18:23,301 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742229_1405 (size=963) 2024-12-06T10:18:23,395 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=98 2024-12-06T10:18:23,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=98 2024-12-06T10:18:23,704 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 0a237205d558afc218e72c1705b7c48d, NAME => 'TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4 2024-12-06T10:18:23,712 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742230_1406 (size=53) 2024-12-06T10:18:23,712 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-06T10:18:23,713 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1681): Closing 0a237205d558afc218e72c1705b7c48d, disabling compactions & flushes 2024-12-06T10:18:23,713 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d. 2024-12-06T10:18:23,713 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d. 2024-12-06T10:18:23,713 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d. after waiting 0 ms 2024-12-06T10:18:23,713 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d. 2024-12-06T10:18:23,713 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d. 2024-12-06T10:18:23,713 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1635): Region close journal for 0a237205d558afc218e72c1705b7c48d: 2024-12-06T10:18:23,714 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=98, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ADD_TO_META 2024-12-06T10:18:23,715 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d.","families":{"info":[{"qualifier":"regioninfo","vlen":52,"tag":[],"timestamp":"1733480303714"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733480303714"}]},"ts":"1733480303714"} 2024-12-06T10:18:23,716 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-12-06T10:18:23,716 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=98, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-06T10:18:23,717 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733480303716"}]},"ts":"1733480303716"} 2024-12-06T10:18:23,717 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLING in hbase:meta 2024-12-06T10:18:23,722 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=99, ppid=98, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=0a237205d558afc218e72c1705b7c48d, ASSIGN}] 2024-12-06T10:18:23,722 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=99, ppid=98, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=0a237205d558afc218e72c1705b7c48d, ASSIGN 2024-12-06T10:18:23,723 INFO [PEWorker-1 {}] assignment.TransitRegionStateProcedure(264): Starting pid=99, ppid=98, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=0a237205d558afc218e72c1705b7c48d, ASSIGN; state=OFFLINE, location=552d6a33fa09,33397,1733480204743; forceNewPlan=false, retain=false 2024-12-06T10:18:23,873 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=99 updating hbase:meta row=0a237205d558afc218e72c1705b7c48d, regionState=OPENING, regionLocation=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:23,875 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=100, ppid=99, state=RUNNABLE; OpenRegionProcedure 0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743}] 2024-12-06T10:18:23,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=98 2024-12-06T10:18:24,026 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,33397,1733480204743 2024-12-06T10:18:24,030 INFO [RS_OPEN_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d. 2024-12-06T10:18:24,030 DEBUG [RS_OPEN_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegion(7285): Opening region: {ENCODED => 0a237205d558afc218e72c1705b7c48d, NAME => 'TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d.', STARTKEY => '', ENDKEY => ''} 2024-12-06T10:18:24,030 DEBUG [RS_OPEN_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees 0a237205d558afc218e72c1705b7c48d 2024-12-06T10:18:24,031 DEBUG [RS_OPEN_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-06T10:18:24,031 DEBUG [RS_OPEN_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegion(7327): checking encryption for 0a237205d558afc218e72c1705b7c48d 2024-12-06T10:18:24,031 DEBUG [RS_OPEN_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegion(7330): checking classloading for 0a237205d558afc218e72c1705b7c48d 2024-12-06T10:18:24,033 INFO [StoreOpener-0a237205d558afc218e72c1705b7c48d-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region 0a237205d558afc218e72c1705b7c48d 2024-12-06T10:18:24,034 INFO [StoreOpener-0a237205d558afc218e72c1705b7c48d-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-06T10:18:24,034 INFO [StoreOpener-0a237205d558afc218e72c1705b7c48d-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 0a237205d558afc218e72c1705b7c48d columnFamilyName A 2024-12-06T10:18:24,034 DEBUG [StoreOpener-0a237205d558afc218e72c1705b7c48d-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:24,035 INFO [StoreOpener-0a237205d558afc218e72c1705b7c48d-1 {}] regionserver.HStore(327): Store=0a237205d558afc218e72c1705b7c48d/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-06T10:18:24,035 INFO [StoreOpener-0a237205d558afc218e72c1705b7c48d-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region 0a237205d558afc218e72c1705b7c48d 2024-12-06T10:18:24,036 INFO [StoreOpener-0a237205d558afc218e72c1705b7c48d-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-06T10:18:24,036 INFO [StoreOpener-0a237205d558afc218e72c1705b7c48d-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 0a237205d558afc218e72c1705b7c48d columnFamilyName B 2024-12-06T10:18:24,036 DEBUG [StoreOpener-0a237205d558afc218e72c1705b7c48d-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:24,036 INFO [StoreOpener-0a237205d558afc218e72c1705b7c48d-1 {}] regionserver.HStore(327): Store=0a237205d558afc218e72c1705b7c48d/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-06T10:18:24,036 INFO [StoreOpener-0a237205d558afc218e72c1705b7c48d-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region 0a237205d558afc218e72c1705b7c48d 2024-12-06T10:18:24,037 INFO [StoreOpener-0a237205d558afc218e72c1705b7c48d-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-06T10:18:24,037 INFO [StoreOpener-0a237205d558afc218e72c1705b7c48d-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 0a237205d558afc218e72c1705b7c48d columnFamilyName C 2024-12-06T10:18:24,038 DEBUG [StoreOpener-0a237205d558afc218e72c1705b7c48d-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:24,038 INFO [StoreOpener-0a237205d558afc218e72c1705b7c48d-1 {}] regionserver.HStore(327): Store=0a237205d558afc218e72c1705b7c48d/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-06T10:18:24,038 INFO [RS_OPEN_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d. 2024-12-06T10:18:24,039 DEBUG [RS_OPEN_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d 2024-12-06T10:18:24,039 DEBUG [RS_OPEN_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d 2024-12-06T10:18:24,041 DEBUG [RS_OPEN_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-06T10:18:24,041 DEBUG [RS_OPEN_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegion(1085): writing seq id for 0a237205d558afc218e72c1705b7c48d 2024-12-06T10:18:24,043 DEBUG [RS_OPEN_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-06T10:18:24,044 INFO [RS_OPEN_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegion(1102): Opened 0a237205d558afc218e72c1705b7c48d; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=72216754, jitterRate=0.0761134922504425}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-06T10:18:24,045 DEBUG [RS_OPEN_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegion(1001): Region open journal for 0a237205d558afc218e72c1705b7c48d: 2024-12-06T10:18:24,046 INFO [RS_OPEN_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d., pid=100, masterSystemTime=1733480304026 2024-12-06T10:18:24,047 DEBUG [RS_OPEN_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d. 2024-12-06T10:18:24,047 INFO [RS_OPEN_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d. 2024-12-06T10:18:24,048 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=99 updating hbase:meta row=0a237205d558afc218e72c1705b7c48d, regionState=OPEN, openSeqNum=2, regionLocation=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:24,050 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=100, resume processing ppid=99 2024-12-06T10:18:24,050 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=100, ppid=99, state=SUCCESS; OpenRegionProcedure 0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 in 174 msec 2024-12-06T10:18:24,051 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=99, resume processing ppid=98 2024-12-06T10:18:24,052 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=99, ppid=98, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=0a237205d558afc218e72c1705b7c48d, ASSIGN in 328 msec 2024-12-06T10:18:24,052 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=98, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-06T10:18:24,052 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733480304052"}]},"ts":"1733480304052"} 2024-12-06T10:18:24,053 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLED in hbase:meta 2024-12-06T10:18:24,056 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=98, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_POST_OPERATION 2024-12-06T10:18:24,057 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=98, state=SUCCESS; CreateTableProcedure table=TestAcidGuarantees in 764 msec 2024-12-06T10:18:24,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=98 2024-12-06T10:18:24,398 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:TestAcidGuarantees, procId: 98 completed 2024-12-06T10:18:24,399 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x2b976e1a to 127.0.0.1:61610 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@1df61dc9 2024-12-06T10:18:24,405 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5fe71801, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-06T10:18:24,406 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-06T10:18:24,408 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50182, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-06T10:18:24,410 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-06T10:18:24,411 INFO [RS-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:47978, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-06T10:18:24,412 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-12-06T10:18:24,413 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.HMaster$14(2798): Client=jenkins//172.17.0.2 modify table TestAcidGuarantees from 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} to 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '4', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-06T10:18:24,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] procedure2.ProcedureExecutor(1098): Stored pid=101, state=RUNNABLE:MODIFY_TABLE_PREPARE; ModifyTableProcedure table=TestAcidGuarantees 2024-12-06T10:18:24,429 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742231_1407 (size=999) 2024-12-06T10:18:24,831 DEBUG [PEWorker-2 {}] util.FSTableDescriptors(519): Deleted hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/.tabledesc/.tableinfo.0000000001.963 2024-12-06T10:18:24,831 INFO [PEWorker-2 {}] util.FSTableDescriptors(297): Updated tableinfo=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/.tabledesc/.tableinfo.0000000002.999 2024-12-06T10:18:24,833 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=102, ppid=101, state=RUNNABLE:REOPEN_TABLE_REGIONS_GET_REGIONS; ReopenTableRegionsProcedure table=TestAcidGuarantees}] 2024-12-06T10:18:24,835 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=103, ppid=102, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=0a237205d558afc218e72c1705b7c48d, REOPEN/MOVE}] 2024-12-06T10:18:24,835 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=103, ppid=102, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=0a237205d558afc218e72c1705b7c48d, REOPEN/MOVE 2024-12-06T10:18:24,836 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=103 updating hbase:meta row=0a237205d558afc218e72c1705b7c48d, regionState=CLOSING, regionLocation=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:24,837 DEBUG [PEWorker-5 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-06T10:18:24,837 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=104, ppid=103, state=RUNNABLE; CloseRegionProcedure 0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743}] 2024-12-06T10:18:24,988 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,33397,1733480204743 2024-12-06T10:18:24,989 INFO [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION, pid=104}] handler.UnassignRegionHandler(124): Close 0a237205d558afc218e72c1705b7c48d 2024-12-06T10:18:24,989 DEBUG [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION, pid=104}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-06T10:18:24,989 DEBUG [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION, pid=104}] regionserver.HRegion(1681): Closing 0a237205d558afc218e72c1705b7c48d, disabling compactions & flushes 2024-12-06T10:18:24,989 INFO [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION, pid=104}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d. 2024-12-06T10:18:24,989 DEBUG [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION, pid=104}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d. 2024-12-06T10:18:24,989 DEBUG [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION, pid=104}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d. after waiting 0 ms 2024-12-06T10:18:24,989 DEBUG [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION, pid=104}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d. 2024-12-06T10:18:24,993 DEBUG [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION, pid=104}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/recovered.edits/4.seqid, newMaxSeqId=4, maxSeqId=1 2024-12-06T10:18:24,994 INFO [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION, pid=104}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d. 2024-12-06T10:18:24,994 DEBUG [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION, pid=104}] regionserver.HRegion(1635): Region close journal for 0a237205d558afc218e72c1705b7c48d: 2024-12-06T10:18:24,994 WARN [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION, pid=104}] regionserver.HRegionServer(3786): Not adding moved region record: 0a237205d558afc218e72c1705b7c48d to self. 2024-12-06T10:18:24,995 INFO [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION, pid=104}] handler.UnassignRegionHandler(170): Closed 0a237205d558afc218e72c1705b7c48d 2024-12-06T10:18:24,996 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=103 updating hbase:meta row=0a237205d558afc218e72c1705b7c48d, regionState=CLOSED 2024-12-06T10:18:24,998 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=104, resume processing ppid=103 2024-12-06T10:18:24,998 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=104, ppid=103, state=SUCCESS; CloseRegionProcedure 0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 in 160 msec 2024-12-06T10:18:24,998 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(264): Starting pid=103, ppid=102, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=0a237205d558afc218e72c1705b7c48d, REOPEN/MOVE; state=CLOSED, location=552d6a33fa09,33397,1733480204743; forceNewPlan=false, retain=true 2024-12-06T10:18:25,149 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=103 updating hbase:meta row=0a237205d558afc218e72c1705b7c48d, regionState=OPENING, regionLocation=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:25,151 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=105, ppid=103, state=RUNNABLE; OpenRegionProcedure 0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743}] 2024-12-06T10:18:25,302 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,33397,1733480204743 2024-12-06T10:18:25,305 INFO [RS_OPEN_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_REGION, pid=105}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d. 2024-12-06T10:18:25,305 DEBUG [RS_OPEN_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_REGION, pid=105}] regionserver.HRegion(7285): Opening region: {ENCODED => 0a237205d558afc218e72c1705b7c48d, NAME => 'TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d.', STARTKEY => '', ENDKEY => ''} 2024-12-06T10:18:25,306 DEBUG [RS_OPEN_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_REGION, pid=105}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees 0a237205d558afc218e72c1705b7c48d 2024-12-06T10:18:25,306 DEBUG [RS_OPEN_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_REGION, pid=105}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-06T10:18:25,306 DEBUG [RS_OPEN_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_REGION, pid=105}] regionserver.HRegion(7327): checking encryption for 0a237205d558afc218e72c1705b7c48d 2024-12-06T10:18:25,306 DEBUG [RS_OPEN_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_REGION, pid=105}] regionserver.HRegion(7330): checking classloading for 0a237205d558afc218e72c1705b7c48d 2024-12-06T10:18:25,307 INFO [StoreOpener-0a237205d558afc218e72c1705b7c48d-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region 0a237205d558afc218e72c1705b7c48d 2024-12-06T10:18:25,308 INFO [StoreOpener-0a237205d558afc218e72c1705b7c48d-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-06T10:18:25,308 INFO [StoreOpener-0a237205d558afc218e72c1705b7c48d-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 0a237205d558afc218e72c1705b7c48d columnFamilyName A 2024-12-06T10:18:25,309 DEBUG [StoreOpener-0a237205d558afc218e72c1705b7c48d-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:25,309 INFO [StoreOpener-0a237205d558afc218e72c1705b7c48d-1 {}] regionserver.HStore(327): Store=0a237205d558afc218e72c1705b7c48d/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-06T10:18:25,310 INFO [StoreOpener-0a237205d558afc218e72c1705b7c48d-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region 0a237205d558afc218e72c1705b7c48d 2024-12-06T10:18:25,310 INFO [StoreOpener-0a237205d558afc218e72c1705b7c48d-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-06T10:18:25,310 INFO [StoreOpener-0a237205d558afc218e72c1705b7c48d-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 0a237205d558afc218e72c1705b7c48d columnFamilyName B 2024-12-06T10:18:25,310 DEBUG [StoreOpener-0a237205d558afc218e72c1705b7c48d-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:25,311 INFO [StoreOpener-0a237205d558afc218e72c1705b7c48d-1 {}] regionserver.HStore(327): Store=0a237205d558afc218e72c1705b7c48d/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-06T10:18:25,311 INFO [StoreOpener-0a237205d558afc218e72c1705b7c48d-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region 0a237205d558afc218e72c1705b7c48d 2024-12-06T10:18:25,311 INFO [StoreOpener-0a237205d558afc218e72c1705b7c48d-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-06T10:18:25,311 INFO [StoreOpener-0a237205d558afc218e72c1705b7c48d-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 0a237205d558afc218e72c1705b7c48d columnFamilyName C 2024-12-06T10:18:25,311 DEBUG [StoreOpener-0a237205d558afc218e72c1705b7c48d-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:25,312 INFO [StoreOpener-0a237205d558afc218e72c1705b7c48d-1 {}] regionserver.HStore(327): Store=0a237205d558afc218e72c1705b7c48d/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-06T10:18:25,312 INFO [RS_OPEN_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_REGION, pid=105}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d. 2024-12-06T10:18:25,313 DEBUG [RS_OPEN_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_REGION, pid=105}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d 2024-12-06T10:18:25,314 DEBUG [RS_OPEN_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_REGION, pid=105}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d 2024-12-06T10:18:25,315 DEBUG [RS_OPEN_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_REGION, pid=105}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-06T10:18:25,316 DEBUG [RS_OPEN_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_REGION, pid=105}] regionserver.HRegion(1085): writing seq id for 0a237205d558afc218e72c1705b7c48d 2024-12-06T10:18:25,317 INFO [RS_OPEN_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_REGION, pid=105}] regionserver.HRegion(1102): Opened 0a237205d558afc218e72c1705b7c48d; next sequenceid=5; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=64728685, jitterRate=-0.03546743094921112}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-06T10:18:25,318 DEBUG [RS_OPEN_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_REGION, pid=105}] regionserver.HRegion(1001): Region open journal for 0a237205d558afc218e72c1705b7c48d: 2024-12-06T10:18:25,319 INFO [RS_OPEN_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_REGION, pid=105}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d., pid=105, masterSystemTime=1733480305302 2024-12-06T10:18:25,320 DEBUG [RS_OPEN_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_REGION, pid=105}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d. 2024-12-06T10:18:25,320 INFO [RS_OPEN_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_REGION, pid=105}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d. 2024-12-06T10:18:25,320 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=103 updating hbase:meta row=0a237205d558afc218e72c1705b7c48d, regionState=OPEN, openSeqNum=5, regionLocation=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:25,322 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=105, resume processing ppid=103 2024-12-06T10:18:25,322 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=105, ppid=103, state=SUCCESS; OpenRegionProcedure 0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 in 171 msec 2024-12-06T10:18:25,324 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=103, resume processing ppid=102 2024-12-06T10:18:25,324 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=103, ppid=102, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=0a237205d558afc218e72c1705b7c48d, REOPEN/MOVE in 487 msec 2024-12-06T10:18:25,326 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=102, resume processing ppid=101 2024-12-06T10:18:25,326 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=102, ppid=101, state=SUCCESS; ReopenTableRegionsProcedure table=TestAcidGuarantees in 491 msec 2024-12-06T10:18:25,327 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=101, state=SUCCESS; ModifyTableProcedure table=TestAcidGuarantees in 913 msec 2024-12-06T10:18:25,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=101 2024-12-06T10:18:25,329 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x1b82ba2a to 127.0.0.1:61610 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@3637e4c6 2024-12-06T10:18:25,336 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@51f7d511, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-06T10:18:25,337 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x7b6cf8cb to 127.0.0.1:61610 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@72f422b4 2024-12-06T10:18:25,340 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1dc42ea6, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-06T10:18:25,340 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x7ec15031 to 127.0.0.1:61610 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@2df33cdf 2024-12-06T10:18:25,345 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@117e86d9, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-06T10:18:25,346 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x3dd5b441 to 127.0.0.1:61610 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@9f472e0 2024-12-06T10:18:25,350 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6cd96549, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-06T10:18:25,350 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x3c336ea4 to 127.0.0.1:61610 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@167a78b0 2024-12-06T10:18:25,356 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@31aea41b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-06T10:18:25,357 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x5aee939b to 127.0.0.1:61610 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@1e247aa1 2024-12-06T10:18:25,361 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@801ba40, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-06T10:18:25,362 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x1f49665c to 127.0.0.1:61610 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@2205f666 2024-12-06T10:18:25,368 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@27539bdc, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-06T10:18:25,369 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x683f8469 to 127.0.0.1:61610 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@6584e9ce 2024-12-06T10:18:25,374 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5e3203d9, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-06T10:18:25,375 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x75e4d3d0 to 127.0.0.1:61610 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@37ec8e3b 2024-12-06T10:18:25,386 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@798e7fd4, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-06T10:18:25,387 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x2b308f62 to 127.0.0.1:61610 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@787e5169 2024-12-06T10:18:25,389 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7284f16d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-06T10:18:25,396 DEBUG [hconnection-0x156fd75c-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-06T10:18:25,396 DEBUG [hconnection-0x5d0c8020-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-06T10:18:25,396 DEBUG [hconnection-0x31aef90e-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-06T10:18:25,397 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50184, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-06T10:18:25,397 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50198, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-06T10:18:25,399 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50200, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-06T10:18:25,400 DEBUG [hconnection-0x3373ae6e-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-06T10:18:25,400 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50216, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-06T10:18:25,403 DEBUG [hconnection-0x479dbae-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-06T10:18:25,403 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-06T10:18:25,404 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50230, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-06T10:18:25,404 DEBUG [hconnection-0x658249fa-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-06T10:18:25,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] procedure2.ProcedureExecutor(1098): Stored pid=106, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=106, table=TestAcidGuarantees 2024-12-06T10:18:25,405 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50244, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-06T10:18:25,405 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=106, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=106, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-06T10:18:25,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=106 2024-12-06T10:18:25,405 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=106, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=106, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-06T10:18:25,405 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=107, ppid=106, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-06T10:18:25,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(8581): Flush requested on 0a237205d558afc218e72c1705b7c48d 2024-12-06T10:18:25,410 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 0a237205d558afc218e72c1705b7c48d 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-06T10:18:25,410 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0a237205d558afc218e72c1705b7c48d, store=A 2024-12-06T10:18:25,411 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:18:25,411 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0a237205d558afc218e72c1705b7c48d, store=B 2024-12-06T10:18:25,411 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:18:25,411 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0a237205d558afc218e72c1705b7c48d, store=C 2024-12-06T10:18:25,411 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:18:25,418 DEBUG [hconnection-0x58b8c9b6-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-06T10:18:25,419 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50246, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-06T10:18:25,422 DEBUG [hconnection-0x36ff7ab2-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-06T10:18:25,423 DEBUG [hconnection-0x1ae0c78a-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-06T10:18:25,424 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50256, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-06T10:18:25,424 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50258, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-06T10:18:25,435 DEBUG [hconnection-0x76bcd905-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-06T10:18:25,436 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50270, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-06T10:18:25,438 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:25,438 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:25,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 5 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50230 deadline: 1733480365437, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:25,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 5 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50256 deadline: 1733480365436, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:25,439 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:25,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50200 deadline: 1733480365439, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:25,439 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:25,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 6 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50246 deadline: 1733480365439, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:25,440 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:25,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50198 deadline: 1733480365440, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:25,469 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241206982c69c1f5a445abbe89bf883e4c1fbb_0a237205d558afc218e72c1705b7c48d is 50, key is test_row_0/A:col10/1733480305409/Put/seqid=0 2024-12-06T10:18:25,506 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=106 2024-12-06T10:18:25,518 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742232_1408 (size=12154) 2024-12-06T10:18:25,540 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:25,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50200 deadline: 1733480365540, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:25,545 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:25,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50198 deadline: 1733480365542, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:25,546 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:25,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 7 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50230 deadline: 1733480365542, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:25,546 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:25,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 7 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50256 deadline: 1733480365542, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:25,553 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:25,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50246 deadline: 1733480365542, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:25,557 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,33397,1733480204743 2024-12-06T10:18:25,557 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33397 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=107 2024-12-06T10:18:25,557 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d. 2024-12-06T10:18:25,557 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d. as already flushing 2024-12-06T10:18:25,558 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d. 2024-12-06T10:18:25,558 ERROR [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] handler.RSProcedureHandler(58): pid=107 java.io.IOException: Unable to complete flush {ENCODED => 0a237205d558afc218e72c1705b7c48d, NAME => 'TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:18:25,558 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=107 java.io.IOException: Unable to complete flush {ENCODED => 0a237205d558afc218e72c1705b7c48d, NAME => 'TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:18:25,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.HMaster(4114): Remote procedure failed, pid=107 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 0a237205d558afc218e72c1705b7c48d, NAME => 'TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 0a237205d558afc218e72c1705b7c48d, NAME => 'TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:18:25,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=106 2024-12-06T10:18:25,710 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,33397,1733480204743 2024-12-06T10:18:25,710 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33397 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=107 2024-12-06T10:18:25,710 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d. 2024-12-06T10:18:25,710 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d. as already flushing 2024-12-06T10:18:25,710 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d. 2024-12-06T10:18:25,710 ERROR [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] handler.RSProcedureHandler(58): pid=107 java.io.IOException: Unable to complete flush {ENCODED => 0a237205d558afc218e72c1705b7c48d, NAME => 'TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:18:25,711 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=107 java.io.IOException: Unable to complete flush {ENCODED => 0a237205d558afc218e72c1705b7c48d, NAME => 'TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:18:25,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.HMaster(4114): Remote procedure failed, pid=107 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 0a237205d558afc218e72c1705b7c48d, NAME => 'TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 0a237205d558afc218e72c1705b7c48d, NAME => 'TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:18:25,744 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:25,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50200 deadline: 1733480365743, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:25,750 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:25,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50198 deadline: 1733480365746, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:25,756 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:25,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50230 deadline: 1733480365754, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:25,757 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:25,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50246 deadline: 1733480365754, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:25,757 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:25,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50256 deadline: 1733480365754, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:25,863 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,33397,1733480204743 2024-12-06T10:18:25,863 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33397 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=107 2024-12-06T10:18:25,863 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d. 2024-12-06T10:18:25,863 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d. as already flushing 2024-12-06T10:18:25,863 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d. 2024-12-06T10:18:25,863 ERROR [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] handler.RSProcedureHandler(58): pid=107 java.io.IOException: Unable to complete flush {ENCODED => 0a237205d558afc218e72c1705b7c48d, NAME => 'TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:18:25,864 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=107 java.io.IOException: Unable to complete flush {ENCODED => 0a237205d558afc218e72c1705b7c48d, NAME => 'TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:18:25,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.HMaster(4114): Remote procedure failed, pid=107 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 0a237205d558afc218e72c1705b7c48d, NAME => 'TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 0a237205d558afc218e72c1705b7c48d, NAME => 'TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:18:25,919 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:25,925 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241206982c69c1f5a445abbe89bf883e4c1fbb_0a237205d558afc218e72c1705b7c48d to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241206982c69c1f5a445abbe89bf883e4c1fbb_0a237205d558afc218e72c1705b7c48d 2024-12-06T10:18:25,927 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/.tmp/A/2619bbf0a5c94c97ba5cf22caccbb10f, store: [table=TestAcidGuarantees family=A region=0a237205d558afc218e72c1705b7c48d] 2024-12-06T10:18:25,928 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/.tmp/A/2619bbf0a5c94c97ba5cf22caccbb10f is 175, key is test_row_0/A:col10/1733480305409/Put/seqid=0 2024-12-06T10:18:25,944 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742233_1409 (size=30955) 2024-12-06T10:18:25,945 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=15, memsize=17.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/.tmp/A/2619bbf0a5c94c97ba5cf22caccbb10f 2024-12-06T10:18:25,979 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/.tmp/B/c5db907d1a3848aead612ec9bbcaf145 is 50, key is test_row_0/B:col10/1733480305409/Put/seqid=0 2024-12-06T10:18:25,989 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742234_1410 (size=12001) 2024-12-06T10:18:25,990 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=15 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/.tmp/B/c5db907d1a3848aead612ec9bbcaf145 2024-12-06T10:18:26,007 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=106 2024-12-06T10:18:26,016 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,33397,1733480204743 2024-12-06T10:18:26,016 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33397 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=107 2024-12-06T10:18:26,017 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d. 2024-12-06T10:18:26,017 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d. as already flushing 2024-12-06T10:18:26,017 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d. 2024-12-06T10:18:26,017 ERROR [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] handler.RSProcedureHandler(58): pid=107 java.io.IOException: Unable to complete flush {ENCODED => 0a237205d558afc218e72c1705b7c48d, NAME => 'TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:18:26,017 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=107 java.io.IOException: Unable to complete flush {ENCODED => 0a237205d558afc218e72c1705b7c48d, NAME => 'TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:18:26,018 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.HMaster(4114): Remote procedure failed, pid=107 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 0a237205d558afc218e72c1705b7c48d, NAME => 'TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 0a237205d558afc218e72c1705b7c48d, NAME => 'TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:18:26,049 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:26,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50200 deadline: 1733480366045, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:26,056 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:26,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50198 deadline: 1733480366054, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:26,063 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:26,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50230 deadline: 1733480366059, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:26,063 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:26,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50246 deadline: 1733480366059, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:26,064 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:26,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50256 deadline: 1733480366060, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:26,077 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/.tmp/C/84eb67c638f048e4a929ba5250bbf237 is 50, key is test_row_0/C:col10/1733480305409/Put/seqid=0 2024-12-06T10:18:26,106 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742235_1411 (size=12001) 2024-12-06T10:18:26,169 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,33397,1733480204743 2024-12-06T10:18:26,170 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33397 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=107 2024-12-06T10:18:26,170 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d. 2024-12-06T10:18:26,170 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d. as already flushing 2024-12-06T10:18:26,170 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d. 2024-12-06T10:18:26,170 ERROR [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] handler.RSProcedureHandler(58): pid=107 java.io.IOException: Unable to complete flush {ENCODED => 0a237205d558afc218e72c1705b7c48d, NAME => 'TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:18:26,170 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=107 java.io.IOException: Unable to complete flush {ENCODED => 0a237205d558afc218e72c1705b7c48d, NAME => 'TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:18:26,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.HMaster(4114): Remote procedure failed, pid=107 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 0a237205d558afc218e72c1705b7c48d, NAME => 'TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 0a237205d558afc218e72c1705b7c48d, NAME => 'TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:18:26,322 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,33397,1733480204743 2024-12-06T10:18:26,323 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33397 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=107 2024-12-06T10:18:26,323 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d. 2024-12-06T10:18:26,323 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d. as already flushing 2024-12-06T10:18:26,323 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d. 2024-12-06T10:18:26,323 ERROR [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] handler.RSProcedureHandler(58): pid=107 java.io.IOException: Unable to complete flush {ENCODED => 0a237205d558afc218e72c1705b7c48d, NAME => 'TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:18:26,323 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=107 java.io.IOException: Unable to complete flush {ENCODED => 0a237205d558afc218e72c1705b7c48d, NAME => 'TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:18:26,324 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.HMaster(4114): Remote procedure failed, pid=107 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 0a237205d558afc218e72c1705b7c48d, NAME => 'TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 0a237205d558afc218e72c1705b7c48d, NAME => 'TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:18:26,476 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,33397,1733480204743 2024-12-06T10:18:26,477 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33397 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=107 2024-12-06T10:18:26,477 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d. 2024-12-06T10:18:26,477 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d. as already flushing 2024-12-06T10:18:26,477 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d. 2024-12-06T10:18:26,477 ERROR [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] handler.RSProcedureHandler(58): pid=107 java.io.IOException: Unable to complete flush {ENCODED => 0a237205d558afc218e72c1705b7c48d, NAME => 'TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:18:26,477 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=107 java.io.IOException: Unable to complete flush {ENCODED => 0a237205d558afc218e72c1705b7c48d, NAME => 'TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:18:26,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.HMaster(4114): Remote procedure failed, pid=107 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 0a237205d558afc218e72c1705b7c48d, NAME => 'TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 0a237205d558afc218e72c1705b7c48d, NAME => 'TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:18:26,505 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=15 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/.tmp/C/84eb67c638f048e4a929ba5250bbf237 2024-12-06T10:18:26,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=106 2024-12-06T10:18:26,511 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/.tmp/A/2619bbf0a5c94c97ba5cf22caccbb10f as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/A/2619bbf0a5c94c97ba5cf22caccbb10f 2024-12-06T10:18:26,516 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/A/2619bbf0a5c94c97ba5cf22caccbb10f, entries=150, sequenceid=15, filesize=30.2 K 2024-12-06T10:18:26,534 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/.tmp/B/c5db907d1a3848aead612ec9bbcaf145 as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/B/c5db907d1a3848aead612ec9bbcaf145 2024-12-06T10:18:26,545 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/B/c5db907d1a3848aead612ec9bbcaf145, entries=150, sequenceid=15, filesize=11.7 K 2024-12-06T10:18:26,545 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/.tmp/C/84eb67c638f048e4a929ba5250bbf237 as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/C/84eb67c638f048e4a929ba5250bbf237 2024-12-06T10:18:26,550 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/C/84eb67c638f048e4a929ba5250bbf237, entries=150, sequenceid=15, filesize=11.7 K 2024-12-06T10:18:26,551 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for 0a237205d558afc218e72c1705b7c48d in 1141ms, sequenceid=15, compaction requested=false 2024-12-06T10:18:26,551 DEBUG [MemStoreFlusher.0 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestAcidGuarantees' 2024-12-06T10:18:26,552 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 0a237205d558afc218e72c1705b7c48d: 2024-12-06T10:18:26,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(8581): Flush requested on 0a237205d558afc218e72c1705b7c48d 2024-12-06T10:18:26,558 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 0a237205d558afc218e72c1705b7c48d 3/3 column families, dataSize=154.31 KB heapSize=405.05 KB 2024-12-06T10:18:26,558 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0a237205d558afc218e72c1705b7c48d, store=A 2024-12-06T10:18:26,558 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:18:26,558 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0a237205d558afc218e72c1705b7c48d, store=B 2024-12-06T10:18:26,558 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:18:26,559 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0a237205d558afc218e72c1705b7c48d, store=C 2024-12-06T10:18:26,559 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:18:26,578 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412061205270b40b24363b47d5715cb4cedd7_0a237205d558afc218e72c1705b7c48d is 50, key is test_row_0/A:col10/1733480305433/Put/seqid=0 2024-12-06T10:18:26,579 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:26,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50246 deadline: 1733480366570, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:26,586 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:26,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50200 deadline: 1733480366575, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:26,586 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:26,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50256 deadline: 1733480366575, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:26,586 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:26,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50198 deadline: 1733480366579, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:26,587 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:26,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50230 deadline: 1733480366579, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:26,627 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742236_1412 (size=14594) 2024-12-06T10:18:26,628 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:26,629 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,33397,1733480204743 2024-12-06T10:18:26,630 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33397 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=107 2024-12-06T10:18:26,630 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d. 2024-12-06T10:18:26,630 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d. as already flushing 2024-12-06T10:18:26,630 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d. 2024-12-06T10:18:26,630 ERROR [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] handler.RSProcedureHandler(58): pid=107 java.io.IOException: Unable to complete flush {ENCODED => 0a237205d558afc218e72c1705b7c48d, NAME => 'TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:18:26,630 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=107 java.io.IOException: Unable to complete flush {ENCODED => 0a237205d558afc218e72c1705b7c48d, NAME => 'TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:18:26,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.HMaster(4114): Remote procedure failed, pid=107 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 0a237205d558afc218e72c1705b7c48d, NAME => 'TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 0a237205d558afc218e72c1705b7c48d, NAME => 'TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:18:26,636 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412061205270b40b24363b47d5715cb4cedd7_0a237205d558afc218e72c1705b7c48d to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412061205270b40b24363b47d5715cb4cedd7_0a237205d558afc218e72c1705b7c48d 2024-12-06T10:18:26,638 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/.tmp/A/99abc431051e4d3cbfb7c8e44b061395, store: [table=TestAcidGuarantees family=A region=0a237205d558afc218e72c1705b7c48d] 2024-12-06T10:18:26,639 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/.tmp/A/99abc431051e4d3cbfb7c8e44b061395 is 175, key is test_row_0/A:col10/1733480305433/Put/seqid=0 2024-12-06T10:18:26,682 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:26,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50246 deadline: 1733480366681, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:26,690 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742237_1413 (size=39549) 2024-12-06T10:18:26,691 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:26,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50256 deadline: 1733480366687, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:26,691 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:26,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50200 deadline: 1733480366687, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:26,691 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:26,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50198 deadline: 1733480366688, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:26,693 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=41, memsize=51.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/.tmp/A/99abc431051e4d3cbfb7c8e44b061395 2024-12-06T10:18:26,694 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:26,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50230 deadline: 1733480366688, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:26,710 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/.tmp/B/377a88d1ecb04e959e20ca1b471f7b68 is 50, key is test_row_0/B:col10/1733480305433/Put/seqid=0 2024-12-06T10:18:26,726 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742238_1414 (size=12001) 2024-12-06T10:18:26,726 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=41 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/.tmp/B/377a88d1ecb04e959e20ca1b471f7b68 2024-12-06T10:18:26,758 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/.tmp/C/0d327fbac0484ead8772e8367223768f is 50, key is test_row_0/C:col10/1733480305433/Put/seqid=0 2024-12-06T10:18:26,773 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742239_1415 (size=12001) 2024-12-06T10:18:26,774 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=41 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/.tmp/C/0d327fbac0484ead8772e8367223768f 2024-12-06T10:18:26,782 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,33397,1733480204743 2024-12-06T10:18:26,783 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33397 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=107 2024-12-06T10:18:26,784 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d. 2024-12-06T10:18:26,784 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d. as already flushing 2024-12-06T10:18:26,784 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d. 2024-12-06T10:18:26,784 ERROR [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] handler.RSProcedureHandler(58): pid=107 java.io.IOException: Unable to complete flush {ENCODED => 0a237205d558afc218e72c1705b7c48d, NAME => 'TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:18:26,784 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=107 java.io.IOException: Unable to complete flush {ENCODED => 0a237205d558afc218e72c1705b7c48d, NAME => 'TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:18:26,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.HMaster(4114): Remote procedure failed, pid=107 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 0a237205d558afc218e72c1705b7c48d, NAME => 'TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 0a237205d558afc218e72c1705b7c48d, NAME => 'TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:18:26,788 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/.tmp/A/99abc431051e4d3cbfb7c8e44b061395 as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/A/99abc431051e4d3cbfb7c8e44b061395 2024-12-06T10:18:26,794 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/A/99abc431051e4d3cbfb7c8e44b061395, entries=200, sequenceid=41, filesize=38.6 K 2024-12-06T10:18:26,797 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/.tmp/B/377a88d1ecb04e959e20ca1b471f7b68 as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/B/377a88d1ecb04e959e20ca1b471f7b68 2024-12-06T10:18:26,805 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/B/377a88d1ecb04e959e20ca1b471f7b68, entries=150, sequenceid=41, filesize=11.7 K 2024-12-06T10:18:26,805 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/.tmp/C/0d327fbac0484ead8772e8367223768f as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/C/0d327fbac0484ead8772e8367223768f 2024-12-06T10:18:26,813 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/C/0d327fbac0484ead8772e8367223768f, entries=150, sequenceid=41, filesize=11.7 K 2024-12-06T10:18:26,814 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~154.31 KB/158010, heapSize ~405 KB/414720, currentSize=46.96 KB/48090 for 0a237205d558afc218e72c1705b7c48d in 256ms, sequenceid=41, compaction requested=false 2024-12-06T10:18:26,814 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 0a237205d558afc218e72c1705b7c48d: 2024-12-06T10:18:26,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(8581): Flush requested on 0a237205d558afc218e72c1705b7c48d 2024-12-06T10:18:26,891 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 0a237205d558afc218e72c1705b7c48d 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-06T10:18:26,891 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0a237205d558afc218e72c1705b7c48d, store=A 2024-12-06T10:18:26,891 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:18:26,891 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0a237205d558afc218e72c1705b7c48d, store=B 2024-12-06T10:18:26,891 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:18:26,891 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0a237205d558afc218e72c1705b7c48d, store=C 2024-12-06T10:18:26,891 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:18:26,902 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241206601512bcd1bd43a0936b25373abd099a_0a237205d558afc218e72c1705b7c48d is 50, key is test_row_0/A:col10/1733480306889/Put/seqid=0 2024-12-06T10:18:26,936 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,33397,1733480204743 2024-12-06T10:18:26,937 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33397 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=107 2024-12-06T10:18:26,937 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d. 2024-12-06T10:18:26,937 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d. as already flushing 2024-12-06T10:18:26,937 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d. 2024-12-06T10:18:26,937 ERROR [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] handler.RSProcedureHandler(58): pid=107 java.io.IOException: Unable to complete flush {ENCODED => 0a237205d558afc218e72c1705b7c48d, NAME => 'TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:18:26,938 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742240_1416 (size=14594) 2024-12-06T10:18:26,938 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=107 java.io.IOException: Unable to complete flush {ENCODED => 0a237205d558afc218e72c1705b7c48d, NAME => 'TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:18:26,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.HMaster(4114): Remote procedure failed, pid=107 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 0a237205d558afc218e72c1705b7c48d, NAME => 'TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 0a237205d558afc218e72c1705b7c48d, NAME => 'TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:18:26,938 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:26,944 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:26,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50198 deadline: 1733480366935, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:26,946 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241206601512bcd1bd43a0936b25373abd099a_0a237205d558afc218e72c1705b7c48d to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241206601512bcd1bd43a0936b25373abd099a_0a237205d558afc218e72c1705b7c48d 2024-12-06T10:18:26,946 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:26,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50200 deadline: 1733480366941, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:26,947 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:26,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 25 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50246 deadline: 1733480366942, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:26,948 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/.tmp/A/b1b0d6f832694aae8518919b72babed6, store: [table=TestAcidGuarantees family=A region=0a237205d558afc218e72c1705b7c48d] 2024-12-06T10:18:26,949 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/.tmp/A/b1b0d6f832694aae8518919b72babed6 is 175, key is test_row_0/A:col10/1733480306889/Put/seqid=0 2024-12-06T10:18:26,951 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:26,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50230 deadline: 1733480366943, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:26,954 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:26,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50256 deadline: 1733480366944, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:27,009 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742241_1417 (size=39549) 2024-12-06T10:18:27,010 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=52, memsize=17.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/.tmp/A/b1b0d6f832694aae8518919b72babed6 2024-12-06T10:18:27,020 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/.tmp/B/d7ac5a2950354648bbf2f9b22aa7c25d is 50, key is test_row_0/B:col10/1733480306889/Put/seqid=0 2024-12-06T10:18:27,049 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742242_1418 (size=12001) 2024-12-06T10:18:27,050 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:27,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50198 deadline: 1733480367046, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:27,053 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:27,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50246 deadline: 1733480367048, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:27,053 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:27,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50200 deadline: 1733480367049, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:27,055 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:27,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50230 deadline: 1733480367053, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:27,059 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:27,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 25 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50256 deadline: 1733480367055, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:27,090 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,33397,1733480204743 2024-12-06T10:18:27,090 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33397 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=107 2024-12-06T10:18:27,090 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d. 2024-12-06T10:18:27,090 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d. as already flushing 2024-12-06T10:18:27,090 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d. 2024-12-06T10:18:27,090 ERROR [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] handler.RSProcedureHandler(58): pid=107 java.io.IOException: Unable to complete flush {ENCODED => 0a237205d558afc218e72c1705b7c48d, NAME => 'TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:18:27,091 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=107 java.io.IOException: Unable to complete flush {ENCODED => 0a237205d558afc218e72c1705b7c48d, NAME => 'TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:18:27,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.HMaster(4114): Remote procedure failed, pid=107 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 0a237205d558afc218e72c1705b7c48d, NAME => 'TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 0a237205d558afc218e72c1705b7c48d, NAME => 'TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:18:27,242 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,33397,1733480204743 2024-12-06T10:18:27,243 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33397 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=107 2024-12-06T10:18:27,243 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d. 2024-12-06T10:18:27,243 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d. as already flushing 2024-12-06T10:18:27,243 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d. 2024-12-06T10:18:27,243 ERROR [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] handler.RSProcedureHandler(58): pid=107 java.io.IOException: Unable to complete flush {ENCODED => 0a237205d558afc218e72c1705b7c48d, NAME => 'TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:18:27,243 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=107 java.io.IOException: Unable to complete flush {ENCODED => 0a237205d558afc218e72c1705b7c48d, NAME => 'TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:18:27,244 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.HMaster(4114): Remote procedure failed, pid=107 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 0a237205d558afc218e72c1705b7c48d, NAME => 'TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 0a237205d558afc218e72c1705b7c48d, NAME => 'TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:18:27,256 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:27,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50198 deadline: 1733480367253, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:27,261 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:27,261 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50246 deadline: 1733480367254, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:27,261 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:27,262 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50200 deadline: 1733480367255, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:27,264 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:27,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50230 deadline: 1733480367258, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:27,268 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:27,268 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50256 deadline: 1733480367261, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:27,395 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,33397,1733480204743 2024-12-06T10:18:27,396 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33397 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=107 2024-12-06T10:18:27,396 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d. 2024-12-06T10:18:27,396 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d. as already flushing 2024-12-06T10:18:27,396 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d. 2024-12-06T10:18:27,396 ERROR [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] handler.RSProcedureHandler(58): pid=107 java.io.IOException: Unable to complete flush {ENCODED => 0a237205d558afc218e72c1705b7c48d, NAME => 'TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:18:27,396 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=107 java.io.IOException: Unable to complete flush {ENCODED => 0a237205d558afc218e72c1705b7c48d, NAME => 'TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:18:27,397 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.HMaster(4114): Remote procedure failed, pid=107 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 0a237205d558afc218e72c1705b7c48d, NAME => 'TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 0a237205d558afc218e72c1705b7c48d, NAME => 'TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:18:27,451 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=52 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/.tmp/B/d7ac5a2950354648bbf2f9b22aa7c25d 2024-12-06T10:18:27,461 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/.tmp/C/1c376718eccc46b4ab24410fcd5afd6f is 50, key is test_row_0/C:col10/1733480306889/Put/seqid=0 2024-12-06T10:18:27,475 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742243_1419 (size=12001) 2024-12-06T10:18:27,476 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=52 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/.tmp/C/1c376718eccc46b4ab24410fcd5afd6f 2024-12-06T10:18:27,487 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/.tmp/A/b1b0d6f832694aae8518919b72babed6 as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/A/b1b0d6f832694aae8518919b72babed6 2024-12-06T10:18:27,497 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/A/b1b0d6f832694aae8518919b72babed6, entries=200, sequenceid=52, filesize=38.6 K 2024-12-06T10:18:27,498 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/.tmp/B/d7ac5a2950354648bbf2f9b22aa7c25d as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/B/d7ac5a2950354648bbf2f9b22aa7c25d 2024-12-06T10:18:27,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=106 2024-12-06T10:18:27,512 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/B/d7ac5a2950354648bbf2f9b22aa7c25d, entries=150, sequenceid=52, filesize=11.7 K 2024-12-06T10:18:27,514 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/.tmp/C/1c376718eccc46b4ab24410fcd5afd6f as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/C/1c376718eccc46b4ab24410fcd5afd6f 2024-12-06T10:18:27,518 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/C/1c376718eccc46b4ab24410fcd5afd6f, entries=150, sequenceid=52, filesize=11.7 K 2024-12-06T10:18:27,519 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for 0a237205d558afc218e72c1705b7c48d in 628ms, sequenceid=52, compaction requested=true 2024-12-06T10:18:27,519 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 0a237205d558afc218e72c1705b7c48d: 2024-12-06T10:18:27,520 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-06T10:18:27,521 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 0a237205d558afc218e72c1705b7c48d:A, priority=-2147483648, current under compaction store size is 1 2024-12-06T10:18:27,521 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T10:18:27,522 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 0a237205d558afc218e72c1705b7c48d:B, priority=-2147483648, current under compaction store size is 2 2024-12-06T10:18:27,522 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T10:18:27,522 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 0a237205d558afc218e72c1705b7c48d:C, priority=-2147483648, current under compaction store size is 3 2024-12-06T10:18:27,522 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-06T10:18:27,522 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-06T10:18:27,522 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 110053 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-06T10:18:27,522 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HStore(1540): 0a237205d558afc218e72c1705b7c48d/A is initiating minor compaction (all files) 2024-12-06T10:18:27,522 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 0a237205d558afc218e72c1705b7c48d/A in TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d. 2024-12-06T10:18:27,522 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/A/2619bbf0a5c94c97ba5cf22caccbb10f, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/A/99abc431051e4d3cbfb7c8e44b061395, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/A/b1b0d6f832694aae8518919b72babed6] into tmpdir=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/.tmp, totalSize=107.5 K 2024-12-06T10:18:27,522 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d. 2024-12-06T10:18:27,522 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d. files: [hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/A/2619bbf0a5c94c97ba5cf22caccbb10f, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/A/99abc431051e4d3cbfb7c8e44b061395, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/A/b1b0d6f832694aae8518919b72babed6] 2024-12-06T10:18:27,523 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.Compactor(224): Compacting 2619bbf0a5c94c97ba5cf22caccbb10f, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=15, earliestPutTs=1733480305404 2024-12-06T10:18:27,528 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.Compactor(224): Compacting 99abc431051e4d3cbfb7c8e44b061395, keycount=200, bloomtype=ROW, size=38.6 K, encoding=NONE, compression=NONE, seqNum=41, earliestPutTs=1733480305433 2024-12-06T10:18:27,528 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-06T10:18:27,528 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HStore(1540): 0a237205d558afc218e72c1705b7c48d/B is initiating minor compaction (all files) 2024-12-06T10:18:27,528 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 0a237205d558afc218e72c1705b7c48d/B in TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d. 2024-12-06T10:18:27,528 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/B/c5db907d1a3848aead612ec9bbcaf145, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/B/377a88d1ecb04e959e20ca1b471f7b68, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/B/d7ac5a2950354648bbf2f9b22aa7c25d] into tmpdir=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/.tmp, totalSize=35.2 K 2024-12-06T10:18:27,528 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.Compactor(224): Compacting b1b0d6f832694aae8518919b72babed6, keycount=200, bloomtype=ROW, size=38.6 K, encoding=NONE, compression=NONE, seqNum=52, earliestPutTs=1733480306567 2024-12-06T10:18:27,529 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.Compactor(224): Compacting c5db907d1a3848aead612ec9bbcaf145, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=15, earliestPutTs=1733480305404 2024-12-06T10:18:27,529 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.Compactor(224): Compacting 377a88d1ecb04e959e20ca1b471f7b68, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=41, earliestPutTs=1733480305433 2024-12-06T10:18:27,530 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.Compactor(224): Compacting d7ac5a2950354648bbf2f9b22aa7c25d, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=52, earliestPutTs=1733480306575 2024-12-06T10:18:27,545 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=0a237205d558afc218e72c1705b7c48d] 2024-12-06T10:18:27,548 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,33397,1733480204743 2024-12-06T10:18:27,549 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33397 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=107 2024-12-06T10:18:27,549 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d. 2024-12-06T10:18:27,549 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegion(2837): Flushing 0a237205d558afc218e72c1705b7c48d 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-12-06T10:18:27,549 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0a237205d558afc218e72c1705b7c48d, store=A 2024-12-06T10:18:27,549 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:18:27,549 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0a237205d558afc218e72c1705b7c48d, store=B 2024-12-06T10:18:27,549 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:18:27,549 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0a237205d558afc218e72c1705b7c48d, store=C 2024-12-06T10:18:27,549 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:18:27,562 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 0a237205d558afc218e72c1705b7c48d#B#compaction#363 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-06T10:18:27,563 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/.tmp/B/b1367b06ce7d4a2da40643788a9131aa is 50, key is test_row_0/B:col10/1733480306889/Put/seqid=0 2024-12-06T10:18:27,573 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d. as already flushing 2024-12-06T10:18:27,573 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e2024120631ba72f6f06241ec9e5a2ccb95813116_0a237205d558afc218e72c1705b7c48d store=[table=TestAcidGuarantees family=A region=0a237205d558afc218e72c1705b7c48d] 2024-12-06T10:18:27,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(8581): Flush requested on 0a237205d558afc218e72c1705b7c48d 2024-12-06T10:18:27,576 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e2024120631ba72f6f06241ec9e5a2ccb95813116_0a237205d558afc218e72c1705b7c48d, store=[table=TestAcidGuarantees family=A region=0a237205d558afc218e72c1705b7c48d] 2024-12-06T10:18:27,576 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024120631ba72f6f06241ec9e5a2ccb95813116_0a237205d558afc218e72c1705b7c48d because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=0a237205d558afc218e72c1705b7c48d] 2024-12-06T10:18:27,595 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:27,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50230 deadline: 1733480367584, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:27,596 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:27,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50256 deadline: 1733480367584, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:27,599 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:27,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50198 deadline: 1733480367590, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:27,600 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:27,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50246 deadline: 1733480367591, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:27,601 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412062738e021ee804f11bc33f785ad2fc104_0a237205d558afc218e72c1705b7c48d is 50, key is test_row_0/A:col10/1733480306940/Put/seqid=0 2024-12-06T10:18:27,605 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:27,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50200 deadline: 1733480367595, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:27,617 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742245_1421 (size=4469) 2024-12-06T10:18:27,640 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742244_1420 (size=12104) 2024-12-06T10:18:27,646 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/.tmp/B/b1367b06ce7d4a2da40643788a9131aa as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/B/b1367b06ce7d4a2da40643788a9131aa 2024-12-06T10:18:27,655 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 0a237205d558afc218e72c1705b7c48d/B of 0a237205d558afc218e72c1705b7c48d into b1367b06ce7d4a2da40643788a9131aa(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-06T10:18:27,655 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 0a237205d558afc218e72c1705b7c48d: 2024-12-06T10:18:27,655 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d., storeName=0a237205d558afc218e72c1705b7c48d/B, priority=13, startTime=1733480307521; duration=0sec 2024-12-06T10:18:27,655 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-06T10:18:27,655 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 0a237205d558afc218e72c1705b7c48d:B 2024-12-06T10:18:27,655 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-06T10:18:27,656 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-06T10:18:27,657 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HStore(1540): 0a237205d558afc218e72c1705b7c48d/C is initiating minor compaction (all files) 2024-12-06T10:18:27,657 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 0a237205d558afc218e72c1705b7c48d/C in TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d. 2024-12-06T10:18:27,657 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/C/84eb67c638f048e4a929ba5250bbf237, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/C/0d327fbac0484ead8772e8367223768f, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/C/1c376718eccc46b4ab24410fcd5afd6f] into tmpdir=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/.tmp, totalSize=35.2 K 2024-12-06T10:18:27,657 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.Compactor(224): Compacting 84eb67c638f048e4a929ba5250bbf237, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=15, earliestPutTs=1733480305404 2024-12-06T10:18:27,658 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.Compactor(224): Compacting 0d327fbac0484ead8772e8367223768f, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=41, earliestPutTs=1733480305433 2024-12-06T10:18:27,658 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.Compactor(224): Compacting 1c376718eccc46b4ab24410fcd5afd6f, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=52, earliestPutTs=1733480306575 2024-12-06T10:18:27,662 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742246_1422 (size=12154) 2024-12-06T10:18:27,663 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,668 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412062738e021ee804f11bc33f785ad2fc104_0a237205d558afc218e72c1705b7c48d to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412062738e021ee804f11bc33f785ad2fc104_0a237205d558afc218e72c1705b7c48d 2024-12-06T10:18:27,670 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/.tmp/A/7174e8c57b8e4ec1a046fb66a6ae9ca8, store: [table=TestAcidGuarantees family=A region=0a237205d558afc218e72c1705b7c48d] 2024-12-06T10:18:27,670 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 0a237205d558afc218e72c1705b7c48d#C#compaction#365 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-06T10:18:27,671 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/.tmp/A/7174e8c57b8e4ec1a046fb66a6ae9ca8 is 175, key is test_row_0/A:col10/1733480306940/Put/seqid=0 2024-12-06T10:18:27,671 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/.tmp/C/95d48a7174444bf8a5d6dd0aa4947f13 is 50, key is test_row_0/C:col10/1733480306889/Put/seqid=0 2024-12-06T10:18:27,682 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742247_1423 (size=30955) 2024-12-06T10:18:27,683 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=77, memsize=49.2 K, hasBloomFilter=true, into tmp file hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/.tmp/A/7174e8c57b8e4ec1a046fb66a6ae9ca8 2024-12-06T10:18:27,683 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742248_1424 (size=12104) 2024-12-06T10:18:27,698 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/.tmp/C/95d48a7174444bf8a5d6dd0aa4947f13 as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/C/95d48a7174444bf8a5d6dd0aa4947f13 2024-12-06T10:18:27,699 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:27,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50256 deadline: 1733480367697, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:27,699 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:27,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50230 deadline: 1733480367697, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:27,703 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 0a237205d558afc218e72c1705b7c48d/C of 0a237205d558afc218e72c1705b7c48d into 95d48a7174444bf8a5d6dd0aa4947f13(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-06T10:18:27,703 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 0a237205d558afc218e72c1705b7c48d: 2024-12-06T10:18:27,703 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d., storeName=0a237205d558afc218e72c1705b7c48d/C, priority=13, startTime=1733480307522; duration=0sec 2024-12-06T10:18:27,703 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T10:18:27,703 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 0a237205d558afc218e72c1705b7c48d:C 2024-12-06T10:18:27,706 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/.tmp/B/7dabef92fc574c5b8188a085f9f6b74e is 50, key is test_row_0/B:col10/1733480306940/Put/seqid=0 2024-12-06T10:18:27,707 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:27,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50246 deadline: 1733480367701, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:27,708 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:27,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50198 deadline: 1733480367701, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:27,712 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:27,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50200 deadline: 1733480367707, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:27,717 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742249_1425 (size=12001) 2024-12-06T10:18:27,717 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=77 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/.tmp/B/7dabef92fc574c5b8188a085f9f6b74e 2024-12-06T10:18:27,734 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/.tmp/C/166e997521724a6880f16c14f5d09542 is 50, key is test_row_0/C:col10/1733480306940/Put/seqid=0 2024-12-06T10:18:27,754 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742250_1426 (size=12001) 2024-12-06T10:18:27,754 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=77 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/.tmp/C/166e997521724a6880f16c14f5d09542 2024-12-06T10:18:27,761 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/.tmp/A/7174e8c57b8e4ec1a046fb66a6ae9ca8 as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/A/7174e8c57b8e4ec1a046fb66a6ae9ca8 2024-12-06T10:18:27,768 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/A/7174e8c57b8e4ec1a046fb66a6ae9ca8, entries=150, sequenceid=77, filesize=30.2 K 2024-12-06T10:18:27,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,772 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/.tmp/B/7dabef92fc574c5b8188a085f9f6b74e as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/B/7dabef92fc574c5b8188a085f9f6b74e 2024-12-06T10:18:27,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,776 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/B/7dabef92fc574c5b8188a085f9f6b74e, entries=150, sequenceid=77, filesize=11.7 K 2024-12-06T10:18:27,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,777 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/.tmp/C/166e997521724a6880f16c14f5d09542 as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/C/166e997521724a6880f16c14f5d09542 2024-12-06T10:18:27,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,784 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/C/166e997521724a6880f16c14f5d09542, entries=150, sequenceid=77, filesize=11.7 K 2024-12-06T10:18:27,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,785 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=53.67 KB/54960 for 0a237205d558afc218e72c1705b7c48d in 236ms, sequenceid=77, compaction requested=false 2024-12-06T10:18:27,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,786 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegion(2538): Flush status journal for 0a237205d558afc218e72c1705b7c48d: 2024-12-06T10:18:27,786 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d. 2024-12-06T10:18:27,786 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=107 2024-12-06T10:18:27,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.HMaster(4106): Remote procedure done, pid=107 2024-12-06T10:18:27,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,789 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=107, resume processing ppid=106 2024-12-06T10:18:27,789 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=107, ppid=106, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.3820 sec 2024-12-06T10:18:27,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,790 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=106, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=106, table=TestAcidGuarantees in 2.3860 sec 2024-12-06T10:18:27,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(8581): Flush requested on 0a237205d558afc218e72c1705b7c48d 2024-12-06T10:18:27,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,972 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 0a237205d558afc218e72c1705b7c48d 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-12-06T10:18:27,972 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0a237205d558afc218e72c1705b7c48d, store=A 2024-12-06T10:18:27,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,973 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:18:27,973 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0a237205d558afc218e72c1705b7c48d, store=B 2024-12-06T10:18:27,973 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:18:27,973 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0a237205d558afc218e72c1705b7c48d, store=C 2024-12-06T10:18:27,973 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:18:27,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,976 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,976 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,977 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,977 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,979 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,979 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,980 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,981 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241206ae3d0b12e18f4fc2a03284df4f078090_0a237205d558afc218e72c1705b7c48d is 50, key is test_row_0/A:col10/1733480307589/Put/seqid=0 2024-12-06T10:18:27,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,984 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,984 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,984 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,990 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,990 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,990 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,990 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,993 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,996 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,997 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:27,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:28,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:28,002 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:28,003 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:28,004 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:28,005 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:28,006 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:28,008 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:28,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:28,019 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 0a237205d558afc218e72c1705b7c48d#A#compaction#362 average throughput is 0.05 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-06T10:18:28,020 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/.tmp/A/aa0608f1c1c840f8ba374febde507b28 is 175, key is test_row_0/A:col10/1733480306889/Put/seqid=0 2024-12-06T10:18:28,049 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742251_1427 (size=12154) 2024-12-06T10:18:28,068 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742252_1428 (size=31058) 2024-12-06T10:18:28,074 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/.tmp/A/aa0608f1c1c840f8ba374febde507b28 as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/A/aa0608f1c1c840f8ba374febde507b28 2024-12-06T10:18:28,087 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 0a237205d558afc218e72c1705b7c48d/A of 0a237205d558afc218e72c1705b7c48d into aa0608f1c1c840f8ba374febde507b28(size=30.3 K), total size for store is 60.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-06T10:18:28,087 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 0a237205d558afc218e72c1705b7c48d: 2024-12-06T10:18:28,087 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d., storeName=0a237205d558afc218e72c1705b7c48d/A, priority=13, startTime=1733480307519; duration=0sec 2024-12-06T10:18:28,088 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T10:18:28,088 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 0a237205d558afc218e72c1705b7c48d:A 2024-12-06T10:18:28,110 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-12-06T10:18:28,125 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:28,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50246 deadline: 1733480368113, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:28,126 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:28,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50200 deadline: 1733480368113, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:28,128 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:28,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50230 deadline: 1733480368116, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:28,134 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:28,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50256 deadline: 1733480368123, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:28,134 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:28,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50198 deadline: 1733480368123, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:28,234 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:28,235 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50246 deadline: 1733480368226, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:28,236 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:28,236 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50200 deadline: 1733480368229, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:28,236 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:28,237 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50230 deadline: 1733480368233, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:28,246 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:28,246 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50256 deadline: 1733480368236, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:28,246 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:28,247 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50198 deadline: 1733480368237, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:28,445 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:28,445 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50246 deadline: 1733480368437, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:28,448 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:28,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50200 deadline: 1733480368444, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:28,450 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:28,453 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:28,454 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50230 deadline: 1733480368444, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:28,455 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241206ae3d0b12e18f4fc2a03284df4f078090_0a237205d558afc218e72c1705b7c48d to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241206ae3d0b12e18f4fc2a03284df4f078090_0a237205d558afc218e72c1705b7c48d 2024-12-06T10:18:28,456 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/.tmp/A/4cad0455c29a410aa7a4c724608055f7, store: [table=TestAcidGuarantees family=A region=0a237205d558afc218e72c1705b7c48d] 2024-12-06T10:18:28,456 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/.tmp/A/4cad0455c29a410aa7a4c724608055f7 is 175, key is test_row_0/A:col10/1733480307589/Put/seqid=0 2024-12-06T10:18:28,457 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:28,457 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50256 deadline: 1733480368447, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:28,457 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:28,458 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50198 deadline: 1733480368448, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:28,505 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742253_1429 (size=30955) 2024-12-06T10:18:28,751 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:28,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50246 deadline: 1733480368747, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:28,754 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:28,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50200 deadline: 1733480368749, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:28,761 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:28,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50230 deadline: 1733480368757, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:28,761 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:28,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50256 deadline: 1733480368760, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:28,761 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:28,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50198 deadline: 1733480368761, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:28,906 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=91, memsize=20.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/.tmp/A/4cad0455c29a410aa7a4c724608055f7 2024-12-06T10:18:28,920 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/.tmp/B/dd24dd9043ea4892bf2afe99be1335d8 is 50, key is test_row_0/B:col10/1733480307589/Put/seqid=0 2024-12-06T10:18:28,974 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742254_1430 (size=12001) 2024-12-06T10:18:29,260 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:29,261 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50246 deadline: 1733480369254, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:29,266 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:29,266 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50200 deadline: 1733480369258, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:29,269 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:29,270 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50198 deadline: 1733480369262, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:29,270 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:29,270 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50256 deadline: 1733480369263, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:29,270 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:29,270 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50230 deadline: 1733480369268, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:29,375 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=91 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/.tmp/B/dd24dd9043ea4892bf2afe99be1335d8 2024-12-06T10:18:29,381 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/.tmp/C/41f75f32711f4022a5ddc2a99844b57d is 50, key is test_row_0/C:col10/1733480307589/Put/seqid=0 2024-12-06T10:18:29,386 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742255_1431 (size=12001) 2024-12-06T10:18:29,387 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=91 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/.tmp/C/41f75f32711f4022a5ddc2a99844b57d 2024-12-06T10:18:29,391 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/.tmp/A/4cad0455c29a410aa7a4c724608055f7 as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/A/4cad0455c29a410aa7a4c724608055f7 2024-12-06T10:18:29,393 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/A/4cad0455c29a410aa7a4c724608055f7, entries=150, sequenceid=91, filesize=30.2 K 2024-12-06T10:18:29,394 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/.tmp/B/dd24dd9043ea4892bf2afe99be1335d8 as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/B/dd24dd9043ea4892bf2afe99be1335d8 2024-12-06T10:18:29,398 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/B/dd24dd9043ea4892bf2afe99be1335d8, entries=150, sequenceid=91, filesize=11.7 K 2024-12-06T10:18:29,398 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/.tmp/C/41f75f32711f4022a5ddc2a99844b57d as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/C/41f75f32711f4022a5ddc2a99844b57d 2024-12-06T10:18:29,401 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/C/41f75f32711f4022a5ddc2a99844b57d, entries=150, sequenceid=91, filesize=11.7 K 2024-12-06T10:18:29,402 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for 0a237205d558afc218e72c1705b7c48d in 1430ms, sequenceid=91, compaction requested=true 2024-12-06T10:18:29,402 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 0a237205d558afc218e72c1705b7c48d: 2024-12-06T10:18:29,402 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 0a237205d558afc218e72c1705b7c48d:A, priority=-2147483648, current under compaction store size is 1 2024-12-06T10:18:29,402 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T10:18:29,402 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-06T10:18:29,402 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-06T10:18:29,402 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 0a237205d558afc218e72c1705b7c48d:B, priority=-2147483648, current under compaction store size is 2 2024-12-06T10:18:29,402 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T10:18:29,402 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 0a237205d558afc218e72c1705b7c48d:C, priority=-2147483648, current under compaction store size is 3 2024-12-06T10:18:29,402 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-06T10:18:29,403 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 92968 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-06T10:18:29,403 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36106 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-06T10:18:29,403 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HStore(1540): 0a237205d558afc218e72c1705b7c48d/A is initiating minor compaction (all files) 2024-12-06T10:18:29,403 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HStore(1540): 0a237205d558afc218e72c1705b7c48d/B is initiating minor compaction (all files) 2024-12-06T10:18:29,403 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 0a237205d558afc218e72c1705b7c48d/B in TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d. 2024-12-06T10:18:29,403 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 0a237205d558afc218e72c1705b7c48d/A in TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d. 2024-12-06T10:18:29,403 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/A/aa0608f1c1c840f8ba374febde507b28, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/A/7174e8c57b8e4ec1a046fb66a6ae9ca8, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/A/4cad0455c29a410aa7a4c724608055f7] into tmpdir=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/.tmp, totalSize=90.8 K 2024-12-06T10:18:29,403 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/B/b1367b06ce7d4a2da40643788a9131aa, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/B/7dabef92fc574c5b8188a085f9f6b74e, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/B/dd24dd9043ea4892bf2afe99be1335d8] into tmpdir=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/.tmp, totalSize=35.3 K 2024-12-06T10:18:29,403 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d. 2024-12-06T10:18:29,403 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d. files: [hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/A/aa0608f1c1c840f8ba374febde507b28, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/A/7174e8c57b8e4ec1a046fb66a6ae9ca8, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/A/4cad0455c29a410aa7a4c724608055f7] 2024-12-06T10:18:29,403 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.Compactor(224): Compacting b1367b06ce7d4a2da40643788a9131aa, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=52, earliestPutTs=1733480306575 2024-12-06T10:18:29,406 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.Compactor(224): Compacting aa0608f1c1c840f8ba374febde507b28, keycount=150, bloomtype=ROW, size=30.3 K, encoding=NONE, compression=NONE, seqNum=52, earliestPutTs=1733480306575 2024-12-06T10:18:29,406 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.Compactor(224): Compacting 7dabef92fc574c5b8188a085f9f6b74e, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=77, earliestPutTs=1733480306940 2024-12-06T10:18:29,406 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.Compactor(224): Compacting 7174e8c57b8e4ec1a046fb66a6ae9ca8, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=77, earliestPutTs=1733480306940 2024-12-06T10:18:29,407 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.Compactor(224): Compacting dd24dd9043ea4892bf2afe99be1335d8, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=91, earliestPutTs=1733480307589 2024-12-06T10:18:29,407 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.Compactor(224): Compacting 4cad0455c29a410aa7a4c724608055f7, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=91, earliestPutTs=1733480307589 2024-12-06T10:18:29,432 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=0a237205d558afc218e72c1705b7c48d] 2024-12-06T10:18:29,434 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 0a237205d558afc218e72c1705b7c48d#B#compaction#372 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-06T10:18:29,434 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/.tmp/B/ccafbf84414d4ea19aef69278fbfe7e7 is 50, key is test_row_0/B:col10/1733480307589/Put/seqid=0 2024-12-06T10:18:29,437 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e2024120689e43d957b134a6cb2cd361065c5d8d9_0a237205d558afc218e72c1705b7c48d store=[table=TestAcidGuarantees family=A region=0a237205d558afc218e72c1705b7c48d] 2024-12-06T10:18:29,439 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e2024120689e43d957b134a6cb2cd361065c5d8d9_0a237205d558afc218e72c1705b7c48d, store=[table=TestAcidGuarantees family=A region=0a237205d558afc218e72c1705b7c48d] 2024-12-06T10:18:29,440 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024120689e43d957b134a6cb2cd361065c5d8d9_0a237205d558afc218e72c1705b7c48d because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=0a237205d558afc218e72c1705b7c48d] 2024-12-06T10:18:29,440 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742256_1432 (size=12207) 2024-12-06T10:18:29,444 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742257_1433 (size=4469) 2024-12-06T10:18:29,445 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 0a237205d558afc218e72c1705b7c48d#A#compaction#371 average throughput is 1.88 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-06T10:18:29,446 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/.tmp/A/42b096ea0afb4e8b8a7ccddc336f389e is 175, key is test_row_0/A:col10/1733480307589/Put/seqid=0 2024-12-06T10:18:29,447 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/.tmp/B/ccafbf84414d4ea19aef69278fbfe7e7 as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/B/ccafbf84414d4ea19aef69278fbfe7e7 2024-12-06T10:18:29,450 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742258_1434 (size=31161) 2024-12-06T10:18:29,451 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 0a237205d558afc218e72c1705b7c48d/B of 0a237205d558afc218e72c1705b7c48d into ccafbf84414d4ea19aef69278fbfe7e7(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-06T10:18:29,451 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 0a237205d558afc218e72c1705b7c48d: 2024-12-06T10:18:29,452 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d., storeName=0a237205d558afc218e72c1705b7c48d/B, priority=13, startTime=1733480309402; duration=0sec 2024-12-06T10:18:29,452 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-06T10:18:29,452 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 0a237205d558afc218e72c1705b7c48d:B 2024-12-06T10:18:29,452 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-06T10:18:29,455 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36106 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-06T10:18:29,455 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HStore(1540): 0a237205d558afc218e72c1705b7c48d/C is initiating minor compaction (all files) 2024-12-06T10:18:29,455 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 0a237205d558afc218e72c1705b7c48d/C in TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d. 2024-12-06T10:18:29,455 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/C/95d48a7174444bf8a5d6dd0aa4947f13, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/C/166e997521724a6880f16c14f5d09542, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/C/41f75f32711f4022a5ddc2a99844b57d] into tmpdir=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/.tmp, totalSize=35.3 K 2024-12-06T10:18:29,456 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.Compactor(224): Compacting 95d48a7174444bf8a5d6dd0aa4947f13, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=52, earliestPutTs=1733480306575 2024-12-06T10:18:29,456 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.Compactor(224): Compacting 166e997521724a6880f16c14f5d09542, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=77, earliestPutTs=1733480306940 2024-12-06T10:18:29,456 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.Compactor(224): Compacting 41f75f32711f4022a5ddc2a99844b57d, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=91, earliestPutTs=1733480307589 2024-12-06T10:18:29,458 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/.tmp/A/42b096ea0afb4e8b8a7ccddc336f389e as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/A/42b096ea0afb4e8b8a7ccddc336f389e 2024-12-06T10:18:29,462 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 0a237205d558afc218e72c1705b7c48d/A of 0a237205d558afc218e72c1705b7c48d into 42b096ea0afb4e8b8a7ccddc336f389e(size=30.4 K), total size for store is 30.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-06T10:18:29,462 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 0a237205d558afc218e72c1705b7c48d: 2024-12-06T10:18:29,462 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d., storeName=0a237205d558afc218e72c1705b7c48d/A, priority=13, startTime=1733480309402; duration=0sec 2024-12-06T10:18:29,462 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T10:18:29,463 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 0a237205d558afc218e72c1705b7c48d:A 2024-12-06T10:18:29,465 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 0a237205d558afc218e72c1705b7c48d#C#compaction#373 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-06T10:18:29,466 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/.tmp/C/8f76fbc799bd4b7dafe92cc292c45d53 is 50, key is test_row_0/C:col10/1733480307589/Put/seqid=0 2024-12-06T10:18:29,469 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742259_1435 (size=12207) 2024-12-06T10:18:29,476 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/.tmp/C/8f76fbc799bd4b7dafe92cc292c45d53 as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/C/8f76fbc799bd4b7dafe92cc292c45d53 2024-12-06T10:18:29,481 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 0a237205d558afc218e72c1705b7c48d/C of 0a237205d558afc218e72c1705b7c48d into 8f76fbc799bd4b7dafe92cc292c45d53(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-06T10:18:29,481 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 0a237205d558afc218e72c1705b7c48d: 2024-12-06T10:18:29,481 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d., storeName=0a237205d558afc218e72c1705b7c48d/C, priority=13, startTime=1733480309402; duration=0sec 2024-12-06T10:18:29,481 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T10:18:29,481 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 0a237205d558afc218e72c1705b7c48d:C 2024-12-06T10:18:29,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=106 2024-12-06T10:18:29,510 INFO [Thread-1817 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 106 completed 2024-12-06T10:18:29,511 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-06T10:18:29,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] procedure2.ProcedureExecutor(1098): Stored pid=108, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=108, table=TestAcidGuarantees 2024-12-06T10:18:29,513 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=108, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=108, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-06T10:18:29,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=108 2024-12-06T10:18:29,514 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=108, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=108, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-06T10:18:29,514 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=109, ppid=108, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-06T10:18:29,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=108 2024-12-06T10:18:29,665 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,33397,1733480204743 2024-12-06T10:18:29,666 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33397 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=109 2024-12-06T10:18:29,666 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d. 2024-12-06T10:18:29,666 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegion(2837): Flushing 0a237205d558afc218e72c1705b7c48d 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-12-06T10:18:29,667 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0a237205d558afc218e72c1705b7c48d, store=A 2024-12-06T10:18:29,667 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:18:29,667 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0a237205d558afc218e72c1705b7c48d, store=B 2024-12-06T10:18:29,667 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:18:29,667 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0a237205d558afc218e72c1705b7c48d, store=C 2024-12-06T10:18:29,667 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:18:29,674 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241206d4fba879792b47c49fd691a0dd560dc1_0a237205d558afc218e72c1705b7c48d is 50, key is test_row_0/A:col10/1733480308106/Put/seqid=0 2024-12-06T10:18:29,682 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742260_1436 (size=12154) 2024-12-06T10:18:29,688 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:29,703 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241206d4fba879792b47c49fd691a0dd560dc1_0a237205d558afc218e72c1705b7c48d to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241206d4fba879792b47c49fd691a0dd560dc1_0a237205d558afc218e72c1705b7c48d 2024-12-06T10:18:29,704 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/.tmp/A/af998095b4434beeb9bfd8c386e5b100, store: [table=TestAcidGuarantees family=A region=0a237205d558afc218e72c1705b7c48d] 2024-12-06T10:18:29,705 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/.tmp/A/af998095b4434beeb9bfd8c386e5b100 is 175, key is test_row_0/A:col10/1733480308106/Put/seqid=0 2024-12-06T10:18:29,719 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742261_1437 (size=30955) 2024-12-06T10:18:29,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=108 2024-12-06T10:18:30,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=108 2024-12-06T10:18:30,120 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=119, memsize=47.0 K, hasBloomFilter=true, into tmp file hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/.tmp/A/af998095b4434beeb9bfd8c386e5b100 2024-12-06T10:18:30,129 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/.tmp/B/539d0d0a92ad472297a8a01d6709057f is 50, key is test_row_0/B:col10/1733480308106/Put/seqid=0 2024-12-06T10:18:30,141 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742262_1438 (size=12001) 2024-12-06T10:18:30,267 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d. as already flushing 2024-12-06T10:18:30,267 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(8581): Flush requested on 0a237205d558afc218e72c1705b7c48d 2024-12-06T10:18:30,283 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:30,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50198 deadline: 1733480370281, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:30,286 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:30,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50256 deadline: 1733480370282, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:30,287 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:30,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50200 deadline: 1733480370282, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:30,289 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:30,290 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:30,290 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50230 deadline: 1733480370283, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:30,290 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50246 deadline: 1733480370283, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:30,386 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:30,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50198 deadline: 1733480370384, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:30,388 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:30,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50256 deadline: 1733480370387, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:30,391 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:30,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50200 deadline: 1733480370388, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:30,392 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:30,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50230 deadline: 1733480370391, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:30,393 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:30,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50246 deadline: 1733480370391, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:30,542 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=119 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/.tmp/B/539d0d0a92ad472297a8a01d6709057f 2024-12-06T10:18:30,560 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/.tmp/C/44bdc2371c8d4644a0dc21bbb000d5e0 is 50, key is test_row_0/C:col10/1733480308106/Put/seqid=0 2024-12-06T10:18:30,577 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742263_1439 (size=12001) 2024-12-06T10:18:30,578 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=119 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/.tmp/C/44bdc2371c8d4644a0dc21bbb000d5e0 2024-12-06T10:18:30,583 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/.tmp/A/af998095b4434beeb9bfd8c386e5b100 as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/A/af998095b4434beeb9bfd8c386e5b100 2024-12-06T10:18:30,590 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/A/af998095b4434beeb9bfd8c386e5b100, entries=150, sequenceid=119, filesize=30.2 K 2024-12-06T10:18:30,591 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/.tmp/B/539d0d0a92ad472297a8a01d6709057f as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/B/539d0d0a92ad472297a8a01d6709057f 2024-12-06T10:18:30,593 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:30,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50198 deadline: 1733480370588, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:30,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:30,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:30,595 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:30,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50256 deadline: 1733480370591, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:30,596 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:30,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50200 deadline: 1733480370594, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:30,596 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/B/539d0d0a92ad472297a8a01d6709057f, entries=150, sequenceid=119, filesize=11.7 K 2024-12-06T10:18:30,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:30,598 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/.tmp/C/44bdc2371c8d4644a0dc21bbb000d5e0 as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/C/44bdc2371c8d4644a0dc21bbb000d5e0 2024-12-06T10:18:30,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:30,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:30,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:30,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:30,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:30,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:30,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:30,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:30,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:30,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:30,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:30,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:30,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:30,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:30,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:30,602 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/C/44bdc2371c8d4644a0dc21bbb000d5e0, entries=150, sequenceid=119, filesize=11.7 K 2024-12-06T10:18:30,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:30,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:30,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:30,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:30,603 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=60.38 KB/61830 for 0a237205d558afc218e72c1705b7c48d in 937ms, sequenceid=119, compaction requested=false 2024-12-06T10:18:30,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:30,603 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegion(2538): Flush status journal for 0a237205d558afc218e72c1705b7c48d: 2024-12-06T10:18:30,603 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d. 2024-12-06T10:18:30,603 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=109 2024-12-06T10:18:30,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:30,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:30,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.HMaster(4106): Remote procedure done, pid=109 2024-12-06T10:18:30,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:30,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:30,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:30,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:30,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:30,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:30,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:30,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:30,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:30,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:30,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:30,607 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=109, resume processing ppid=108 2024-12-06T10:18:30,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:30,607 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=109, ppid=108, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.0910 sec 2024-12-06T10:18:30,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:30,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:30,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:30,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:30,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:30,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:30,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:30,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:30,608 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=108, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=108, table=TestAcidGuarantees in 1.0960 sec 2024-12-06T10:18:30,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:30,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:30,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:30,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:30,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:30,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:30,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:30,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:30,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:30,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:30,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:30,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:30,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:30,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:30,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:30,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:30,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:30,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:30,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:30,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:30,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:30,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:30,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:30,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:30,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:30,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:30,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:30,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:30,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:30,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:30,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:30,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:30,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:30,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:30,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:30,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:30,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:30,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:30,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:30,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:30,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=108 2024-12-06T10:18:30,617 INFO [Thread-1817 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 108 completed 2024-12-06T10:18:30,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:30,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:30,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:30,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:30,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:30,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:30,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:30,619 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-06T10:18:30,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:30,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:30,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] procedure2.ProcedureExecutor(1098): Stored pid=110, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=110, table=TestAcidGuarantees 2024-12-06T10:18:30,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:30,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:30,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:30,620 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=110, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=110, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-06T10:18:30,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:30,621 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=110, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=110, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-06T10:18:30,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:30,621 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=111, ppid=110, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-06T10:18:30,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:30,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=110 2024-12-06T10:18:30,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:30,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:30,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:30,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:30,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:30,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:30,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:30,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:30,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:30,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:30,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:30,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:30,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:30,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:30,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:30,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:30,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:30,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:30,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:30,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:30,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:30,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:30,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:30,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:30,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:30,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:30,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:30,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:30,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:30,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:30,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:30,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:30,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:30,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:30,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:30,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:30,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:30,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:30,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:30,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:30,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:30,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:30,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:30,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:30,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:30,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:30,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(8581): Flush requested on 0a237205d558afc218e72c1705b7c48d 2024-12-06T10:18:30,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:30,636 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 0a237205d558afc218e72c1705b7c48d 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-12-06T10:18:30,636 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0a237205d558afc218e72c1705b7c48d, store=A 2024-12-06T10:18:30,636 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:18:30,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:30,636 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0a237205d558afc218e72c1705b7c48d, store=B 2024-12-06T10:18:30,636 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:18:30,636 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0a237205d558afc218e72c1705b7c48d, store=C 2024-12-06T10:18:30,636 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:18:30,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:30,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:30,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:30,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:30,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:30,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:30,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:30,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:30,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:30,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:30,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:30,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:30,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:30,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:30,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:30,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:30,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:30,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:30,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:30,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:30,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:30,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:30,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:30,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:30,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:30,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:30,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:30,650 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:30,650 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024120669b1d403ebbc4b9a92cdaa02f75ce652_0a237205d558afc218e72c1705b7c48d is 50, key is test_row_1/A:col10/1733480310281/Put/seqid=0 2024-12-06T10:18:30,650 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:30,650 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:30,650 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:30,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:30,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:30,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:30,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:30,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:30,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:30,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:30,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:30,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:30,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:30,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:30,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:30,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:30,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:30,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:30,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:30,655 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:30,655 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:30,655 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:30,655 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:30,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:30,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:30,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:30,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:30,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:30,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:30,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:30,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:30,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:30,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:30,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:30,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:30,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:30,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:30,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:30,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:30,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:30,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:30,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:30,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:30,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:30,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:30,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:30,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:30,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:30,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:30,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:30,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:30,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:30,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:30,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:30,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:30,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:30,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:30,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:30,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:30,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:30,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:30,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:30,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:30,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:30,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:30,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:30,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:30,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:30,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:30,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:30,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:30,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:30,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:30,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:30,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:30,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:30,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:30,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:30,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:30,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:30,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:30,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:30,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:30,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:30,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:30,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:30,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:30,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:30,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:30,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:30,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:30,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:30,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:30,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:30,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:30,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:30,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:30,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:30,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:30,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:30,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:30,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:30,674 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742264_1440 (size=12304) 2024-12-06T10:18:30,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:30,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:30,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:30,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:30,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:30,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:30,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:30,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:30,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:30,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:30,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:30,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:30,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:30,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:30,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:30,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:30,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:30,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:30,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:30,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:30,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:30,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:30,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:30,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:30,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:30,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:30,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:30,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:30,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:30,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:30,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:30,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:30,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:30,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:30,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:30,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:30,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:30,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:30,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:30,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:30,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:30,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:30,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:30,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:30,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:30,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:30,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:30,701 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:30,701 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:30,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:30,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:30,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:30,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:30,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:30,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:30,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:30,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:30,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:30,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:30,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:30,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:30,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:30,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:30,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:30,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:30,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:30,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:30,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:30,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:30,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:30,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:30,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:30,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=110 2024-12-06T10:18:30,764 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:30,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50230 deadline: 1733480370760, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:30,769 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:30,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50246 deadline: 1733480370763, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:30,773 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,33397,1733480204743 2024-12-06T10:18:30,773 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33397 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=111 2024-12-06T10:18:30,773 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d. 2024-12-06T10:18:30,773 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d. as already flushing 2024-12-06T10:18:30,773 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d. 2024-12-06T10:18:30,773 ERROR [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] handler.RSProcedureHandler(58): pid=111 java.io.IOException: Unable to complete flush {ENCODED => 0a237205d558afc218e72c1705b7c48d, NAME => 'TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:18:30,774 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=111 java.io.IOException: Unable to complete flush {ENCODED => 0a237205d558afc218e72c1705b7c48d, NAME => 'TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:18:30,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.HMaster(4114): Remote procedure failed, pid=111 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 0a237205d558afc218e72c1705b7c48d, NAME => 'TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 0a237205d558afc218e72c1705b7c48d, NAME => 'TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:18:30,870 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:30,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50230 deadline: 1733480370866, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:30,878 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:30,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50246 deadline: 1733480370873, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:30,899 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:30,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50198 deadline: 1733480370895, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:30,903 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:30,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50256 deadline: 1733480370897, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:30,903 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:30,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50200 deadline: 1733480370898, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:30,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=110 2024-12-06T10:18:30,928 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,33397,1733480204743 2024-12-06T10:18:30,928 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33397 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=111 2024-12-06T10:18:30,928 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d. 2024-12-06T10:18:30,928 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d. as already flushing 2024-12-06T10:18:30,929 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d. 2024-12-06T10:18:30,929 ERROR [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] handler.RSProcedureHandler(58): pid=111 java.io.IOException: Unable to complete flush {ENCODED => 0a237205d558afc218e72c1705b7c48d, NAME => 'TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:18:30,929 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=111 java.io.IOException: Unable to complete flush {ENCODED => 0a237205d558afc218e72c1705b7c48d, NAME => 'TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:18:30,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.HMaster(4114): Remote procedure failed, pid=111 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 0a237205d558afc218e72c1705b7c48d, NAME => 'TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 0a237205d558afc218e72c1705b7c48d, NAME => 'TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:18:31,075 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:31,077 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:31,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50230 deadline: 1733480371073, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:31,080 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024120669b1d403ebbc4b9a92cdaa02f75ce652_0a237205d558afc218e72c1705b7c48d to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024120669b1d403ebbc4b9a92cdaa02f75ce652_0a237205d558afc218e72c1705b7c48d 2024-12-06T10:18:31,081 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/.tmp/A/30db58433fe7434aaa311b5dbb8e2f8b, store: [table=TestAcidGuarantees family=A region=0a237205d558afc218e72c1705b7c48d] 2024-12-06T10:18:31,081 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,33397,1733480204743 2024-12-06T10:18:31,081 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/.tmp/A/30db58433fe7434aaa311b5dbb8e2f8b is 175, key is test_row_1/A:col10/1733480310281/Put/seqid=0 2024-12-06T10:18:31,082 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33397 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=111 2024-12-06T10:18:31,082 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d. 2024-12-06T10:18:31,082 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d. as already flushing 2024-12-06T10:18:31,082 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d. 2024-12-06T10:18:31,082 ERROR [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] handler.RSProcedureHandler(58): pid=111 java.io.IOException: Unable to complete flush {ENCODED => 0a237205d558afc218e72c1705b7c48d, NAME => 'TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:18:31,082 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=111 java.io.IOException: Unable to complete flush {ENCODED => 0a237205d558afc218e72c1705b7c48d, NAME => 'TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:18:31,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.HMaster(4114): Remote procedure failed, pid=111 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 0a237205d558afc218e72c1705b7c48d, NAME => 'TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 0a237205d558afc218e72c1705b7c48d, NAME => 'TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:18:31,086 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:31,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 73 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50246 deadline: 1733480371081, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:31,089 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742265_1441 (size=31101) 2024-12-06T10:18:31,089 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=132, memsize=22.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/.tmp/A/30db58433fe7434aaa311b5dbb8e2f8b 2024-12-06T10:18:31,101 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/.tmp/B/21950afa029f4d42822d3c974b418ac6 is 50, key is test_row_1/B:col10/1733480310281/Put/seqid=0 2024-12-06T10:18:31,142 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742266_1442 (size=9757) 2024-12-06T10:18:31,225 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=110 2024-12-06T10:18:31,234 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,33397,1733480204743 2024-12-06T10:18:31,235 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33397 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=111 2024-12-06T10:18:31,235 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d. 2024-12-06T10:18:31,235 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d. as already flushing 2024-12-06T10:18:31,235 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d. 2024-12-06T10:18:31,235 ERROR [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] handler.RSProcedureHandler(58): pid=111 java.io.IOException: Unable to complete flush {ENCODED => 0a237205d558afc218e72c1705b7c48d, NAME => 'TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:18:31,235 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=111 java.io.IOException: Unable to complete flush {ENCODED => 0a237205d558afc218e72c1705b7c48d, NAME => 'TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:18:31,236 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.HMaster(4114): Remote procedure failed, pid=111 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 0a237205d558afc218e72c1705b7c48d, NAME => 'TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 0a237205d558afc218e72c1705b7c48d, NAME => 'TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:18:31,382 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:31,382 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50230 deadline: 1733480371379, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:31,388 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,33397,1733480204743 2024-12-06T10:18:31,389 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33397 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=111 2024-12-06T10:18:31,389 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d. 2024-12-06T10:18:31,389 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d. as already flushing 2024-12-06T10:18:31,389 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d. 2024-12-06T10:18:31,389 ERROR [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] handler.RSProcedureHandler(58): pid=111 java.io.IOException: Unable to complete flush {ENCODED => 0a237205d558afc218e72c1705b7c48d, NAME => 'TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:18:31,389 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=111 java.io.IOException: Unable to complete flush {ENCODED => 0a237205d558afc218e72c1705b7c48d, NAME => 'TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:18:31,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.HMaster(4114): Remote procedure failed, pid=111 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 0a237205d558afc218e72c1705b7c48d, NAME => 'TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 0a237205d558afc218e72c1705b7c48d, NAME => 'TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:18:31,393 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:31,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 75 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50246 deadline: 1733480371389, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:31,404 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:31,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50198 deadline: 1733480371400, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:31,408 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:31,408 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50256 deadline: 1733480371404, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:31,412 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:31,412 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50200 deadline: 1733480371407, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:31,542 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,33397,1733480204743 2024-12-06T10:18:31,542 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33397 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=111 2024-12-06T10:18:31,542 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d. 2024-12-06T10:18:31,542 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d. as already flushing 2024-12-06T10:18:31,542 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d. 2024-12-06T10:18:31,542 ERROR [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] handler.RSProcedureHandler(58): pid=111 java.io.IOException: Unable to complete flush {ENCODED => 0a237205d558afc218e72c1705b7c48d, NAME => 'TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:18:31,543 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=111 java.io.IOException: Unable to complete flush {ENCODED => 0a237205d558afc218e72c1705b7c48d, NAME => 'TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:18:31,543 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=132 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/.tmp/B/21950afa029f4d42822d3c974b418ac6 2024-12-06T10:18:31,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.HMaster(4114): Remote procedure failed, pid=111 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 0a237205d558afc218e72c1705b7c48d, NAME => 'TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 0a237205d558afc218e72c1705b7c48d, NAME => 'TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:18:31,557 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/.tmp/C/b65e0b4e05ae48a3b6d685e7b3bbeed2 is 50, key is test_row_1/C:col10/1733480310281/Put/seqid=0 2024-12-06T10:18:31,560 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742267_1443 (size=9757) 2024-12-06T10:18:31,694 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,33397,1733480204743 2024-12-06T10:18:31,695 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33397 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=111 2024-12-06T10:18:31,695 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d. 2024-12-06T10:18:31,695 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d. as already flushing 2024-12-06T10:18:31,695 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d. 2024-12-06T10:18:31,695 ERROR [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] handler.RSProcedureHandler(58): pid=111 java.io.IOException: Unable to complete flush {ENCODED => 0a237205d558afc218e72c1705b7c48d, NAME => 'TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:18:31,695 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=111 java.io.IOException: Unable to complete flush {ENCODED => 0a237205d558afc218e72c1705b7c48d, NAME => 'TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:18:31,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.HMaster(4114): Remote procedure failed, pid=111 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 0a237205d558afc218e72c1705b7c48d, NAME => 'TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 0a237205d558afc218e72c1705b7c48d, NAME => 'TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:18:31,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=110 2024-12-06T10:18:31,847 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,33397,1733480204743 2024-12-06T10:18:31,847 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33397 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=111 2024-12-06T10:18:31,847 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d. 2024-12-06T10:18:31,848 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d. as already flushing 2024-12-06T10:18:31,848 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d. 2024-12-06T10:18:31,848 ERROR [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] handler.RSProcedureHandler(58): pid=111 java.io.IOException: Unable to complete flush {ENCODED => 0a237205d558afc218e72c1705b7c48d, NAME => 'TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:18:31,848 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=111 java.io.IOException: Unable to complete flush {ENCODED => 0a237205d558afc218e72c1705b7c48d, NAME => 'TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:18:31,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.HMaster(4114): Remote procedure failed, pid=111 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 0a237205d558afc218e72c1705b7c48d, NAME => 'TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 0a237205d558afc218e72c1705b7c48d, NAME => 'TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:18:31,890 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:31,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50230 deadline: 1733480371886, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:31,895 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:31,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 77 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50246 deadline: 1733480371894, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:31,961 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=132 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/.tmp/C/b65e0b4e05ae48a3b6d685e7b3bbeed2 2024-12-06T10:18:31,969 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/.tmp/A/30db58433fe7434aaa311b5dbb8e2f8b as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/A/30db58433fe7434aaa311b5dbb8e2f8b 2024-12-06T10:18:31,974 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/A/30db58433fe7434aaa311b5dbb8e2f8b, entries=150, sequenceid=132, filesize=30.4 K 2024-12-06T10:18:31,975 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/.tmp/B/21950afa029f4d42822d3c974b418ac6 as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/B/21950afa029f4d42822d3c974b418ac6 2024-12-06T10:18:31,980 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/B/21950afa029f4d42822d3c974b418ac6, entries=100, sequenceid=132, filesize=9.5 K 2024-12-06T10:18:31,981 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/.tmp/C/b65e0b4e05ae48a3b6d685e7b3bbeed2 as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/C/b65e0b4e05ae48a3b6d685e7b3bbeed2 2024-12-06T10:18:31,984 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/C/b65e0b4e05ae48a3b6d685e7b3bbeed2, entries=100, sequenceid=132, filesize=9.5 K 2024-12-06T10:18:31,985 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=134.18 KB/137400 for 0a237205d558afc218e72c1705b7c48d in 1350ms, sequenceid=132, compaction requested=true 2024-12-06T10:18:31,985 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 0a237205d558afc218e72c1705b7c48d: 2024-12-06T10:18:31,985 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 0a237205d558afc218e72c1705b7c48d:A, priority=-2147483648, current under compaction store size is 1 2024-12-06T10:18:31,985 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T10:18:31,985 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 0a237205d558afc218e72c1705b7c48d:B, priority=-2147483648, current under compaction store size is 2 2024-12-06T10:18:31,985 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-06T10:18:31,985 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T10:18:31,985 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 0a237205d558afc218e72c1705b7c48d:C, priority=-2147483648, current under compaction store size is 3 2024-12-06T10:18:31,985 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-06T10:18:31,985 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-06T10:18:31,986 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 33965 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-06T10:18:31,986 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 93217 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-06T10:18:31,986 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HStore(1540): 0a237205d558afc218e72c1705b7c48d/B is initiating minor compaction (all files) 2024-12-06T10:18:31,986 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HStore(1540): 0a237205d558afc218e72c1705b7c48d/A is initiating minor compaction (all files) 2024-12-06T10:18:31,986 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 0a237205d558afc218e72c1705b7c48d/A in TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d. 2024-12-06T10:18:31,986 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 0a237205d558afc218e72c1705b7c48d/B in TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d. 2024-12-06T10:18:31,986 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/A/42b096ea0afb4e8b8a7ccddc336f389e, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/A/af998095b4434beeb9bfd8c386e5b100, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/A/30db58433fe7434aaa311b5dbb8e2f8b] into tmpdir=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/.tmp, totalSize=91.0 K 2024-12-06T10:18:31,986 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/B/ccafbf84414d4ea19aef69278fbfe7e7, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/B/539d0d0a92ad472297a8a01d6709057f, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/B/21950afa029f4d42822d3c974b418ac6] into tmpdir=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/.tmp, totalSize=33.2 K 2024-12-06T10:18:31,986 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d. 2024-12-06T10:18:31,986 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d. files: [hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/A/42b096ea0afb4e8b8a7ccddc336f389e, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/A/af998095b4434beeb9bfd8c386e5b100, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/A/30db58433fe7434aaa311b5dbb8e2f8b] 2024-12-06T10:18:31,987 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.Compactor(224): Compacting ccafbf84414d4ea19aef69278fbfe7e7, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=91, earliestPutTs=1733480307589 2024-12-06T10:18:31,987 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.Compactor(224): Compacting 42b096ea0afb4e8b8a7ccddc336f389e, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=91, earliestPutTs=1733480307589 2024-12-06T10:18:31,987 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.Compactor(224): Compacting 539d0d0a92ad472297a8a01d6709057f, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=119, earliestPutTs=1733480308096 2024-12-06T10:18:31,987 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.Compactor(224): Compacting af998095b4434beeb9bfd8c386e5b100, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=119, earliestPutTs=1733480308096 2024-12-06T10:18:31,989 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.Compactor(224): Compacting 21950afa029f4d42822d3c974b418ac6, keycount=100, bloomtype=ROW, size=9.5 K, encoding=NONE, compression=NONE, seqNum=132, earliestPutTs=1733480310281 2024-12-06T10:18:31,989 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.Compactor(224): Compacting 30db58433fe7434aaa311b5dbb8e2f8b, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=132, earliestPutTs=1733480310281 2024-12-06T10:18:32,000 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,33397,1733480204743 2024-12-06T10:18:32,000 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 0a237205d558afc218e72c1705b7c48d#B#compaction#380 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-06T10:18:32,000 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33397 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=111 2024-12-06T10:18:32,000 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d. 2024-12-06T10:18:32,001 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HRegion(2837): Flushing 0a237205d558afc218e72c1705b7c48d 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-12-06T10:18:32,001 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/.tmp/B/62e2b8ed72d0480995e27bc5ac4c92f0 is 50, key is test_row_0/B:col10/1733480308106/Put/seqid=0 2024-12-06T10:18:32,001 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0a237205d558afc218e72c1705b7c48d, store=A 2024-12-06T10:18:32,001 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:18:32,001 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0a237205d558afc218e72c1705b7c48d, store=B 2024-12-06T10:18:32,001 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:18:32,001 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0a237205d558afc218e72c1705b7c48d, store=C 2024-12-06T10:18:32,001 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:18:32,012 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=0a237205d558afc218e72c1705b7c48d] 2024-12-06T10:18:32,021 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742268_1444 (size=12409) 2024-12-06T10:18:32,025 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412060aa0c4d97df04194aa0f08788622060c_0a237205d558afc218e72c1705b7c48d is 50, key is test_row_0/A:col10/1733480310761/Put/seqid=0 2024-12-06T10:18:32,028 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/.tmp/B/62e2b8ed72d0480995e27bc5ac4c92f0 as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/B/62e2b8ed72d0480995e27bc5ac4c92f0 2024-12-06T10:18:32,031 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 0a237205d558afc218e72c1705b7c48d/B of 0a237205d558afc218e72c1705b7c48d into 62e2b8ed72d0480995e27bc5ac4c92f0(size=12.1 K), total size for store is 12.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-06T10:18:32,031 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 0a237205d558afc218e72c1705b7c48d: 2024-12-06T10:18:32,031 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d., storeName=0a237205d558afc218e72c1705b7c48d/B, priority=13, startTime=1733480311985; duration=0sec 2024-12-06T10:18:32,031 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-06T10:18:32,031 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 0a237205d558afc218e72c1705b7c48d:B 2024-12-06T10:18:32,031 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-06T10:18:32,032 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 33965 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-06T10:18:32,032 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HStore(1540): 0a237205d558afc218e72c1705b7c48d/C is initiating minor compaction (all files) 2024-12-06T10:18:32,032 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 0a237205d558afc218e72c1705b7c48d/C in TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d. 2024-12-06T10:18:32,032 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/C/8f76fbc799bd4b7dafe92cc292c45d53, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/C/44bdc2371c8d4644a0dc21bbb000d5e0, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/C/b65e0b4e05ae48a3b6d685e7b3bbeed2] into tmpdir=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/.tmp, totalSize=33.2 K 2024-12-06T10:18:32,033 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.Compactor(224): Compacting 8f76fbc799bd4b7dafe92cc292c45d53, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=91, earliestPutTs=1733480307589 2024-12-06T10:18:32,033 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.Compactor(224): Compacting 44bdc2371c8d4644a0dc21bbb000d5e0, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=119, earliestPutTs=1733480308096 2024-12-06T10:18:32,033 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.Compactor(224): Compacting b65e0b4e05ae48a3b6d685e7b3bbeed2, keycount=100, bloomtype=ROW, size=9.5 K, encoding=NONE, compression=NONE, seqNum=132, earliestPutTs=1733480310281 2024-12-06T10:18:32,036 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241206bcad46734bcb45d8ae00c57b8a515b3f_0a237205d558afc218e72c1705b7c48d store=[table=TestAcidGuarantees family=A region=0a237205d558afc218e72c1705b7c48d] 2024-12-06T10:18:32,037 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241206bcad46734bcb45d8ae00c57b8a515b3f_0a237205d558afc218e72c1705b7c48d, store=[table=TestAcidGuarantees family=A region=0a237205d558afc218e72c1705b7c48d] 2024-12-06T10:18:32,038 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241206bcad46734bcb45d8ae00c57b8a515b3f_0a237205d558afc218e72c1705b7c48d because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=0a237205d558afc218e72c1705b7c48d] 2024-12-06T10:18:32,077 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 0a237205d558afc218e72c1705b7c48d#C#compaction#383 average throughput is unlimited, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-06T10:18:32,078 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/.tmp/C/794bdf2a94a448b494b33aecabab93d8 is 50, key is test_row_0/C:col10/1733480308106/Put/seqid=0 2024-12-06T10:18:32,081 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742270_1446 (size=4469) 2024-12-06T10:18:32,081 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742269_1445 (size=12304) 2024-12-06T10:18:32,082 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:32,084 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 0a237205d558afc218e72c1705b7c48d#A#compaction#381 average throughput is 0.34 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-06T10:18:32,084 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/.tmp/A/b6166a50349748668c947c2dc8331b00 is 175, key is test_row_0/A:col10/1733480308106/Put/seqid=0 2024-12-06T10:18:32,090 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412060aa0c4d97df04194aa0f08788622060c_0a237205d558afc218e72c1705b7c48d to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412060aa0c4d97df04194aa0f08788622060c_0a237205d558afc218e72c1705b7c48d 2024-12-06T10:18:32,091 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/.tmp/A/9ce6669ea95141aea87c775e38510cfd, store: [table=TestAcidGuarantees family=A region=0a237205d558afc218e72c1705b7c48d] 2024-12-06T10:18:32,092 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/.tmp/A/9ce6669ea95141aea87c775e38510cfd is 175, key is test_row_0/A:col10/1733480310761/Put/seqid=0 2024-12-06T10:18:32,112 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742271_1447 (size=12409) 2024-12-06T10:18:32,117 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/.tmp/C/794bdf2a94a448b494b33aecabab93d8 as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/C/794bdf2a94a448b494b33aecabab93d8 2024-12-06T10:18:32,119 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742272_1448 (size=31470) 2024-12-06T10:18:32,123 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 0a237205d558afc218e72c1705b7c48d/C of 0a237205d558afc218e72c1705b7c48d into 794bdf2a94a448b494b33aecabab93d8(size=12.1 K), total size for store is 12.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-06T10:18:32,123 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 0a237205d558afc218e72c1705b7c48d: 2024-12-06T10:18:32,123 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d., storeName=0a237205d558afc218e72c1705b7c48d/C, priority=13, startTime=1733480311985; duration=0sec 2024-12-06T10:18:32,123 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T10:18:32,123 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 0a237205d558afc218e72c1705b7c48d:C 2024-12-06T10:18:32,124 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/.tmp/A/b6166a50349748668c947c2dc8331b00 as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/A/b6166a50349748668c947c2dc8331b00 2024-12-06T10:18:32,130 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 0a237205d558afc218e72c1705b7c48d/A of 0a237205d558afc218e72c1705b7c48d into b6166a50349748668c947c2dc8331b00(size=30.7 K), total size for store is 30.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-06T10:18:32,130 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 0a237205d558afc218e72c1705b7c48d: 2024-12-06T10:18:32,130 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d., storeName=0a237205d558afc218e72c1705b7c48d/A, priority=13, startTime=1733480311985; duration=0sec 2024-12-06T10:18:32,130 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T10:18:32,130 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 0a237205d558afc218e72c1705b7c48d:A 2024-12-06T10:18:32,138 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742273_1449 (size=31105) 2024-12-06T10:18:32,139 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=155, memsize=44.7 K, hasBloomFilter=true, into tmp file hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/.tmp/A/9ce6669ea95141aea87c775e38510cfd 2024-12-06T10:18:32,147 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/.tmp/B/d161d12d2d964ee39be16db7c4802375 is 50, key is test_row_0/B:col10/1733480310761/Put/seqid=0 2024-12-06T10:18:32,156 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742274_1450 (size=12151) 2024-12-06T10:18:32,416 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(8581): Flush requested on 0a237205d558afc218e72c1705b7c48d 2024-12-06T10:18:32,416 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d. as already flushing 2024-12-06T10:18:32,436 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:32,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50200 deadline: 1733480372431, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:32,437 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:32,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50198 deadline: 1733480372432, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:32,440 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:32,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50256 deadline: 1733480372436, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:32,540 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:32,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50198 deadline: 1733480372538, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:32,544 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:32,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50200 deadline: 1733480372539, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:32,546 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:32,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50256 deadline: 1733480372541, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:32,556 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=155 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/.tmp/B/d161d12d2d964ee39be16db7c4802375 2024-12-06T10:18:32,574 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/.tmp/C/560d62365081467cbeb5c351af19e9ff is 50, key is test_row_0/C:col10/1733480310761/Put/seqid=0 2024-12-06T10:18:32,593 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742275_1451 (size=12151) 2024-12-06T10:18:32,594 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=155 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/.tmp/C/560d62365081467cbeb5c351af19e9ff 2024-12-06T10:18:32,600 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/.tmp/A/9ce6669ea95141aea87c775e38510cfd as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/A/9ce6669ea95141aea87c775e38510cfd 2024-12-06T10:18:32,607 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/A/9ce6669ea95141aea87c775e38510cfd, entries=150, sequenceid=155, filesize=30.4 K 2024-12-06T10:18:32,609 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/.tmp/B/d161d12d2d964ee39be16db7c4802375 as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/B/d161d12d2d964ee39be16db7c4802375 2024-12-06T10:18:32,614 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/B/d161d12d2d964ee39be16db7c4802375, entries=150, sequenceid=155, filesize=11.9 K 2024-12-06T10:18:32,615 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/.tmp/C/560d62365081467cbeb5c351af19e9ff as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/C/560d62365081467cbeb5c351af19e9ff 2024-12-06T10:18:32,622 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/C/560d62365081467cbeb5c351af19e9ff, entries=150, sequenceid=155, filesize=11.9 K 2024-12-06T10:18:32,622 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=67.09 KB/68700 for 0a237205d558afc218e72c1705b7c48d in 622ms, sequenceid=155, compaction requested=false 2024-12-06T10:18:32,622 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HRegion(2538): Flush status journal for 0a237205d558afc218e72c1705b7c48d: 2024-12-06T10:18:32,623 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d. 2024-12-06T10:18:32,623 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=111 2024-12-06T10:18:32,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.HMaster(4106): Remote procedure done, pid=111 2024-12-06T10:18:32,625 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=111, resume processing ppid=110 2024-12-06T10:18:32,625 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=111, ppid=110, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.0030 sec 2024-12-06T10:18:32,628 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=110, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=110, table=TestAcidGuarantees in 2.0080 sec 2024-12-06T10:18:32,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=110 2024-12-06T10:18:32,727 INFO [Thread-1817 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 110 completed 2024-12-06T10:18:32,728 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-06T10:18:32,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] procedure2.ProcedureExecutor(1098): Stored pid=112, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=112, table=TestAcidGuarantees 2024-12-06T10:18:32,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=112 2024-12-06T10:18:32,730 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=112, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=112, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-06T10:18:32,731 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=112, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=112, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-06T10:18:32,731 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=113, ppid=112, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-06T10:18:32,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(8581): Flush requested on 0a237205d558afc218e72c1705b7c48d 2024-12-06T10:18:32,744 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 0a237205d558afc218e72c1705b7c48d 3/3 column families, dataSize=73.80 KB heapSize=194.11 KB 2024-12-06T10:18:32,744 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0a237205d558afc218e72c1705b7c48d, store=A 2024-12-06T10:18:32,744 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:18:32,744 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0a237205d558afc218e72c1705b7c48d, store=B 2024-12-06T10:18:32,744 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:18:32,744 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0a237205d558afc218e72c1705b7c48d, store=C 2024-12-06T10:18:32,744 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:18:32,752 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241206ada6979867264173a01ef1e500dc658e_0a237205d558afc218e72c1705b7c48d is 50, key is test_row_0/A:col10/1733480312425/Put/seqid=0 2024-12-06T10:18:32,767 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742276_1452 (size=14794) 2024-12-06T10:18:32,800 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:32,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50198 deadline: 1733480372792, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:32,801 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:32,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50200 deadline: 1733480372792, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:32,801 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:32,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 73 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50256 deadline: 1733480372792, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:32,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=112 2024-12-06T10:18:32,883 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,33397,1733480204743 2024-12-06T10:18:32,883 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33397 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=113 2024-12-06T10:18:32,883 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d. 2024-12-06T10:18:32,883 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d. as already flushing 2024-12-06T10:18:32,884 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d. 2024-12-06T10:18:32,884 ERROR [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] handler.RSProcedureHandler(58): pid=113 java.io.IOException: Unable to complete flush {ENCODED => 0a237205d558afc218e72c1705b7c48d, NAME => 'TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:18:32,884 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=113 java.io.IOException: Unable to complete flush {ENCODED => 0a237205d558afc218e72c1705b7c48d, NAME => 'TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:18:32,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.HMaster(4114): Remote procedure failed, pid=113 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 0a237205d558afc218e72c1705b7c48d, NAME => 'TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 0a237205d558afc218e72c1705b7c48d, NAME => 'TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:18:32,903 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:32,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 73 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50230 deadline: 1733480372896, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:32,907 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:32,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 79 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50246 deadline: 1733480372902, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:32,907 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:32,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50198 deadline: 1733480372902, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:32,908 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:32,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50200 deadline: 1733480372902, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:32,908 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:32,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 75 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50256 deadline: 1733480372902, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:33,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=112 2024-12-06T10:18:33,036 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,33397,1733480204743 2024-12-06T10:18:33,037 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33397 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=113 2024-12-06T10:18:33,037 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d. 2024-12-06T10:18:33,037 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d. as already flushing 2024-12-06T10:18:33,037 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d. 2024-12-06T10:18:33,037 ERROR [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] handler.RSProcedureHandler(58): pid=113 java.io.IOException: Unable to complete flush {ENCODED => 0a237205d558afc218e72c1705b7c48d, NAME => 'TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:18:33,037 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=113 java.io.IOException: Unable to complete flush {ENCODED => 0a237205d558afc218e72c1705b7c48d, NAME => 'TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:18:33,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.HMaster(4114): Remote procedure failed, pid=113 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 0a237205d558afc218e72c1705b7c48d, NAME => 'TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 0a237205d558afc218e72c1705b7c48d, NAME => 'TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:18:33,111 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:33,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50198 deadline: 1733480373109, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:33,116 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:33,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 77 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50256 deadline: 1733480373110, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:33,119 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:33,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50200 deadline: 1733480373112, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:33,167 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:33,173 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241206ada6979867264173a01ef1e500dc658e_0a237205d558afc218e72c1705b7c48d to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241206ada6979867264173a01ef1e500dc658e_0a237205d558afc218e72c1705b7c48d 2024-12-06T10:18:33,176 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/.tmp/A/0edf375441ff42828c53d5db6594c339, store: [table=TestAcidGuarantees family=A region=0a237205d558afc218e72c1705b7c48d] 2024-12-06T10:18:33,176 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/.tmp/A/0edf375441ff42828c53d5db6594c339 is 175, key is test_row_0/A:col10/1733480312425/Put/seqid=0 2024-12-06T10:18:33,189 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,33397,1733480204743 2024-12-06T10:18:33,190 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33397 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=113 2024-12-06T10:18:33,190 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742277_1453 (size=39749) 2024-12-06T10:18:33,190 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d. 2024-12-06T10:18:33,190 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d. as already flushing 2024-12-06T10:18:33,190 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d. 2024-12-06T10:18:33,190 ERROR [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] handler.RSProcedureHandler(58): pid=113 java.io.IOException: Unable to complete flush {ENCODED => 0a237205d558afc218e72c1705b7c48d, NAME => 'TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:18:33,191 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=113 java.io.IOException: Unable to complete flush {ENCODED => 0a237205d558afc218e72c1705b7c48d, NAME => 'TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:18:33,191 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=172, memsize=24.6 K, hasBloomFilter=true, into tmp file hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/.tmp/A/0edf375441ff42828c53d5db6594c339 2024-12-06T10:18:33,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.HMaster(4114): Remote procedure failed, pid=113 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 0a237205d558afc218e72c1705b7c48d, NAME => 'TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 0a237205d558afc218e72c1705b7c48d, NAME => 'TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:18:33,206 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/.tmp/B/3bf35279fc7e4d05af0a7c343eb2d242 is 50, key is test_row_0/B:col10/1733480312425/Put/seqid=0 2024-12-06T10:18:33,235 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742278_1454 (size=12151) 2024-12-06T10:18:33,236 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=172 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/.tmp/B/3bf35279fc7e4d05af0a7c343eb2d242 2024-12-06T10:18:33,247 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/.tmp/C/500df7f35a574de0afd25ea6726c8f48 is 50, key is test_row_0/C:col10/1733480312425/Put/seqid=0 2024-12-06T10:18:33,264 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742279_1455 (size=12151) 2024-12-06T10:18:33,266 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=172 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/.tmp/C/500df7f35a574de0afd25ea6726c8f48 2024-12-06T10:18:33,278 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/.tmp/A/0edf375441ff42828c53d5db6594c339 as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/A/0edf375441ff42828c53d5db6594c339 2024-12-06T10:18:33,284 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/A/0edf375441ff42828c53d5db6594c339, entries=200, sequenceid=172, filesize=38.8 K 2024-12-06T10:18:33,286 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/.tmp/B/3bf35279fc7e4d05af0a7c343eb2d242 as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/B/3bf35279fc7e4d05af0a7c343eb2d242 2024-12-06T10:18:33,293 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/B/3bf35279fc7e4d05af0a7c343eb2d242, entries=150, sequenceid=172, filesize=11.9 K 2024-12-06T10:18:33,294 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/.tmp/C/500df7f35a574de0afd25ea6726c8f48 as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/C/500df7f35a574de0afd25ea6726c8f48 2024-12-06T10:18:33,298 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/C/500df7f35a574de0afd25ea6726c8f48, entries=150, sequenceid=172, filesize=11.9 K 2024-12-06T10:18:33,300 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~73.80 KB/75570, heapSize ~194.06 KB/198720, currentSize=127.47 KB/130530 for 0a237205d558afc218e72c1705b7c48d in 556ms, sequenceid=172, compaction requested=true 2024-12-06T10:18:33,300 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 0a237205d558afc218e72c1705b7c48d: 2024-12-06T10:18:33,300 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 0a237205d558afc218e72c1705b7c48d:A, priority=-2147483648, current under compaction store size is 1 2024-12-06T10:18:33,300 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-06T10:18:33,300 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 0a237205d558afc218e72c1705b7c48d:B, priority=-2147483648, current under compaction store size is 2 2024-12-06T10:18:33,300 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-12-06T10:18:33,300 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 0a237205d558afc218e72c1705b7c48d:C, priority=-2147483648, current under compaction store size is 3 2024-12-06T10:18:33,300 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-12-06T10:18:33,300 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-06T10:18:33,300 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-06T10:18:33,302 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36711 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-06T10:18:33,302 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HStore(1540): 0a237205d558afc218e72c1705b7c48d/C is initiating minor compaction (all files) 2024-12-06T10:18:33,302 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 0a237205d558afc218e72c1705b7c48d/C in TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d. 2024-12-06T10:18:33,302 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/C/794bdf2a94a448b494b33aecabab93d8, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/C/560d62365081467cbeb5c351af19e9ff, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/C/500df7f35a574de0afd25ea6726c8f48] into tmpdir=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/.tmp, totalSize=35.9 K 2024-12-06T10:18:33,302 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 102324 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-06T10:18:33,302 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HStore(1540): 0a237205d558afc218e72c1705b7c48d/A is initiating minor compaction (all files) 2024-12-06T10:18:33,302 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 0a237205d558afc218e72c1705b7c48d/A in TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d. 2024-12-06T10:18:33,303 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/A/b6166a50349748668c947c2dc8331b00, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/A/9ce6669ea95141aea87c775e38510cfd, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/A/0edf375441ff42828c53d5db6594c339] into tmpdir=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/.tmp, totalSize=99.9 K 2024-12-06T10:18:33,303 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d. 2024-12-06T10:18:33,303 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d. files: [hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/A/b6166a50349748668c947c2dc8331b00, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/A/9ce6669ea95141aea87c775e38510cfd, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/A/0edf375441ff42828c53d5db6594c339] 2024-12-06T10:18:33,303 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.Compactor(224): Compacting 794bdf2a94a448b494b33aecabab93d8, keycount=150, bloomtype=ROW, size=12.1 K, encoding=NONE, compression=NONE, seqNum=132, earliestPutTs=1733480308106 2024-12-06T10:18:33,303 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.Compactor(224): Compacting 560d62365081467cbeb5c351af19e9ff, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=155, earliestPutTs=1733480310748 2024-12-06T10:18:33,303 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.Compactor(224): Compacting b6166a50349748668c947c2dc8331b00, keycount=150, bloomtype=ROW, size=30.7 K, encoding=NONE, compression=NONE, seqNum=132, earliestPutTs=1733480308106 2024-12-06T10:18:33,304 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.Compactor(224): Compacting 500df7f35a574de0afd25ea6726c8f48, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=172, earliestPutTs=1733480312425 2024-12-06T10:18:33,304 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.Compactor(224): Compacting 9ce6669ea95141aea87c775e38510cfd, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=155, earliestPutTs=1733480310748 2024-12-06T10:18:33,305 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.Compactor(224): Compacting 0edf375441ff42828c53d5db6594c339, keycount=200, bloomtype=ROW, size=38.8 K, encoding=NONE, compression=NONE, seqNum=172, earliestPutTs=1733480312425 2024-12-06T10:18:33,322 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 0a237205d558afc218e72c1705b7c48d#C#compaction#389 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-06T10:18:33,323 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/.tmp/C/6e14c7f1826042f09a9b0cdb0a4ed180 is 50, key is test_row_0/C:col10/1733480312425/Put/seqid=0 2024-12-06T10:18:33,328 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=0a237205d558afc218e72c1705b7c48d] 2024-12-06T10:18:33,331 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241206cb9e1986e59c4b58bab79260d3dcea2f_0a237205d558afc218e72c1705b7c48d store=[table=TestAcidGuarantees family=A region=0a237205d558afc218e72c1705b7c48d] 2024-12-06T10:18:33,333 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241206cb9e1986e59c4b58bab79260d3dcea2f_0a237205d558afc218e72c1705b7c48d, store=[table=TestAcidGuarantees family=A region=0a237205d558afc218e72c1705b7c48d] 2024-12-06T10:18:33,334 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241206cb9e1986e59c4b58bab79260d3dcea2f_0a237205d558afc218e72c1705b7c48d because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=0a237205d558afc218e72c1705b7c48d] 2024-12-06T10:18:33,335 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=112 2024-12-06T10:18:33,345 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,33397,1733480204743 2024-12-06T10:18:33,346 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33397 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=113 2024-12-06T10:18:33,346 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d. 2024-12-06T10:18:33,346 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegion(2837): Flushing 0a237205d558afc218e72c1705b7c48d 3/3 column families, dataSize=127.47 KB heapSize=334.73 KB 2024-12-06T10:18:33,346 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0a237205d558afc218e72c1705b7c48d, store=A 2024-12-06T10:18:33,346 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:18:33,346 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0a237205d558afc218e72c1705b7c48d, store=B 2024-12-06T10:18:33,346 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:18:33,346 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0a237205d558afc218e72c1705b7c48d, store=C 2024-12-06T10:18:33,346 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:18:33,368 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742280_1456 (size=12561) 2024-12-06T10:18:33,392 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742281_1457 (size=4469) 2024-12-06T10:18:33,406 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241206d0596a9203ca42e8b6e8657f37316e69_0a237205d558afc218e72c1705b7c48d is 50, key is test_row_0/A:col10/1733480312790/Put/seqid=0 2024-12-06T10:18:33,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(8581): Flush requested on 0a237205d558afc218e72c1705b7c48d 2024-12-06T10:18:33,423 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d. as already flushing 2024-12-06T10:18:33,437 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742282_1458 (size=12304) 2024-12-06T10:18:33,438 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:33,442 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241206d0596a9203ca42e8b6e8657f37316e69_0a237205d558afc218e72c1705b7c48d to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241206d0596a9203ca42e8b6e8657f37316e69_0a237205d558afc218e72c1705b7c48d 2024-12-06T10:18:33,444 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/.tmp/A/e86d9f85a826411799219b878d5bf9bb, store: [table=TestAcidGuarantees family=A region=0a237205d558afc218e72c1705b7c48d] 2024-12-06T10:18:33,445 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/.tmp/A/e86d9f85a826411799219b878d5bf9bb is 175, key is test_row_0/A:col10/1733480312790/Put/seqid=0 2024-12-06T10:18:33,462 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:33,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 91 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50200 deadline: 1733480373453, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:33,463 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:33,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 94 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50198 deadline: 1733480373456, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:33,469 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:33,470 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50256 deadline: 1733480373460, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:33,480 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742283_1459 (size=31105) 2024-12-06T10:18:33,569 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:33,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50198 deadline: 1733480373564, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:33,569 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:33,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50200 deadline: 1733480373564, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:33,578 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:33,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50256 deadline: 1733480373571, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:33,773 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/.tmp/C/6e14c7f1826042f09a9b0cdb0a4ed180 as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/C/6e14c7f1826042f09a9b0cdb0a4ed180 2024-12-06T10:18:33,775 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:33,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50198 deadline: 1733480373771, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:33,776 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:33,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 95 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50200 deadline: 1733480373771, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:33,778 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 0a237205d558afc218e72c1705b7c48d/C of 0a237205d558afc218e72c1705b7c48d into 6e14c7f1826042f09a9b0cdb0a4ed180(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-06T10:18:33,779 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 0a237205d558afc218e72c1705b7c48d: 2024-12-06T10:18:33,779 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d., storeName=0a237205d558afc218e72c1705b7c48d/C, priority=13, startTime=1733480313300; duration=0sec 2024-12-06T10:18:33,779 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-06T10:18:33,779 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 0a237205d558afc218e72c1705b7c48d:C 2024-12-06T10:18:33,779 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-06T10:18:33,780 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36711 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-06T10:18:33,780 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HStore(1540): 0a237205d558afc218e72c1705b7c48d/B is initiating minor compaction (all files) 2024-12-06T10:18:33,780 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 0a237205d558afc218e72c1705b7c48d/B in TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d. 2024-12-06T10:18:33,780 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/B/62e2b8ed72d0480995e27bc5ac4c92f0, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/B/d161d12d2d964ee39be16db7c4802375, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/B/3bf35279fc7e4d05af0a7c343eb2d242] into tmpdir=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/.tmp, totalSize=35.9 K 2024-12-06T10:18:33,781 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.Compactor(224): Compacting 62e2b8ed72d0480995e27bc5ac4c92f0, keycount=150, bloomtype=ROW, size=12.1 K, encoding=NONE, compression=NONE, seqNum=132, earliestPutTs=1733480308106 2024-12-06T10:18:33,781 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.Compactor(224): Compacting d161d12d2d964ee39be16db7c4802375, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=155, earliestPutTs=1733480310748 2024-12-06T10:18:33,783 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.Compactor(224): Compacting 3bf35279fc7e4d05af0a7c343eb2d242, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=172, earliestPutTs=1733480312425 2024-12-06T10:18:33,783 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:33,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 87 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50256 deadline: 1733480373779, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:33,793 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 0a237205d558afc218e72c1705b7c48d#A#compaction#390 average throughput is 0.05 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-06T10:18:33,794 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/.tmp/A/fa91e4bc7549434dafee949bc33a35fa is 175, key is test_row_0/A:col10/1733480312425/Put/seqid=0 2024-12-06T10:18:33,801 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 0a237205d558afc218e72c1705b7c48d#B#compaction#392 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-06T10:18:33,801 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/.tmp/B/20ef227323754aa4967559eafa504aea is 50, key is test_row_0/B:col10/1733480312425/Put/seqid=0 2024-12-06T10:18:33,805 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742284_1460 (size=31515) 2024-12-06T10:18:33,825 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742285_1461 (size=12561) 2024-12-06T10:18:33,834 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/.tmp/B/20ef227323754aa4967559eafa504aea as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/B/20ef227323754aa4967559eafa504aea 2024-12-06T10:18:33,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=112 2024-12-06T10:18:33,838 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 0a237205d558afc218e72c1705b7c48d/B of 0a237205d558afc218e72c1705b7c48d into 20ef227323754aa4967559eafa504aea(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-06T10:18:33,838 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 0a237205d558afc218e72c1705b7c48d: 2024-12-06T10:18:33,838 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d., storeName=0a237205d558afc218e72c1705b7c48d/B, priority=13, startTime=1733480313300; duration=0sec 2024-12-06T10:18:33,838 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T10:18:33,838 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 0a237205d558afc218e72c1705b7c48d:B 2024-12-06T10:18:33,881 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=194, memsize=42.5 K, hasBloomFilter=true, into tmp file hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/.tmp/A/e86d9f85a826411799219b878d5bf9bb 2024-12-06T10:18:33,888 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/.tmp/B/662fabc51125429d93e6b9ead1a439fe is 50, key is test_row_0/B:col10/1733480312790/Put/seqid=0 2024-12-06T10:18:33,894 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742286_1462 (size=12151) 2024-12-06T10:18:33,899 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=194 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/.tmp/B/662fabc51125429d93e6b9ead1a439fe 2024-12-06T10:18:33,905 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/.tmp/C/d5b10eeb4b944763a80a6d5f49f31064 is 50, key is test_row_0/C:col10/1733480312790/Put/seqid=0 2024-12-06T10:18:33,911 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742287_1463 (size=12151) 2024-12-06T10:18:34,082 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:34,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50198 deadline: 1733480374078, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:34,083 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:34,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 97 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50200 deadline: 1733480374078, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:34,090 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:34,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 89 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50256 deadline: 1733480374086, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:34,217 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/.tmp/A/fa91e4bc7549434dafee949bc33a35fa as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/A/fa91e4bc7549434dafee949bc33a35fa 2024-12-06T10:18:34,222 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 0a237205d558afc218e72c1705b7c48d/A of 0a237205d558afc218e72c1705b7c48d into fa91e4bc7549434dafee949bc33a35fa(size=30.8 K), total size for store is 30.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-06T10:18:34,222 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 0a237205d558afc218e72c1705b7c48d: 2024-12-06T10:18:34,222 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d., storeName=0a237205d558afc218e72c1705b7c48d/A, priority=13, startTime=1733480313300; duration=0sec 2024-12-06T10:18:34,222 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T10:18:34,222 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 0a237205d558afc218e72c1705b7c48d:A 2024-12-06T10:18:34,312 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=194 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/.tmp/C/d5b10eeb4b944763a80a6d5f49f31064 2024-12-06T10:18:34,317 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/.tmp/A/e86d9f85a826411799219b878d5bf9bb as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/A/e86d9f85a826411799219b878d5bf9bb 2024-12-06T10:18:34,321 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/A/e86d9f85a826411799219b878d5bf9bb, entries=150, sequenceid=194, filesize=30.4 K 2024-12-06T10:18:34,322 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/.tmp/B/662fabc51125429d93e6b9ead1a439fe as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/B/662fabc51125429d93e6b9ead1a439fe 2024-12-06T10:18:34,327 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/B/662fabc51125429d93e6b9ead1a439fe, entries=150, sequenceid=194, filesize=11.9 K 2024-12-06T10:18:34,327 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/.tmp/C/d5b10eeb4b944763a80a6d5f49f31064 as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/C/d5b10eeb4b944763a80a6d5f49f31064 2024-12-06T10:18:34,331 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/C/d5b10eeb4b944763a80a6d5f49f31064, entries=150, sequenceid=194, filesize=11.9 K 2024-12-06T10:18:34,332 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegion(3040): Finished flush of dataSize ~127.47 KB/130530, heapSize ~334.69 KB/342720, currentSize=73.80 KB/75570 for 0a237205d558afc218e72c1705b7c48d in 986ms, sequenceid=194, compaction requested=false 2024-12-06T10:18:34,332 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegion(2538): Flush status journal for 0a237205d558afc218e72c1705b7c48d: 2024-12-06T10:18:34,332 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d. 2024-12-06T10:18:34,332 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=113 2024-12-06T10:18:34,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.HMaster(4106): Remote procedure done, pid=113 2024-12-06T10:18:34,335 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=113, resume processing ppid=112 2024-12-06T10:18:34,335 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=113, ppid=112, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.6030 sec 2024-12-06T10:18:34,337 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=112, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=112, table=TestAcidGuarantees in 1.6080 sec 2024-12-06T10:18:34,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(8581): Flush requested on 0a237205d558afc218e72c1705b7c48d 2024-12-06T10:18:34,588 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 0a237205d558afc218e72c1705b7c48d 3/3 column families, dataSize=80.51 KB heapSize=211.69 KB 2024-12-06T10:18:34,589 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0a237205d558afc218e72c1705b7c48d, store=A 2024-12-06T10:18:34,590 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:18:34,590 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0a237205d558afc218e72c1705b7c48d, store=B 2024-12-06T10:18:34,590 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:18:34,590 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0a237205d558afc218e72c1705b7c48d, store=C 2024-12-06T10:18:34,590 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:18:34,605 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241206315212ffe14e403e8657d4255d5197fe_0a237205d558afc218e72c1705b7c48d is 50, key is test_row_0/A:col10/1733480314588/Put/seqid=0 2024-12-06T10:18:34,609 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742288_1464 (size=17284) 2024-12-06T10:18:34,632 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:34,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 110 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50198 deadline: 1733480374623, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:34,632 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:34,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 105 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50200 deadline: 1733480374624, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:34,633 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:34,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50256 deadline: 1733480374628, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:34,739 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:34,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 112 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50198 deadline: 1733480374733, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:34,739 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:34,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50200 deadline: 1733480374733, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:34,739 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:34,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50256 deadline: 1733480374734, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:34,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=112 2024-12-06T10:18:34,837 INFO [Thread-1817 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 112 completed 2024-12-06T10:18:34,838 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-06T10:18:34,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] procedure2.ProcedureExecutor(1098): Stored pid=114, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=114, table=TestAcidGuarantees 2024-12-06T10:18:34,840 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=114, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=114, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-06T10:18:34,842 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=114, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=114, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-06T10:18:34,842 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=115, ppid=114, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-06T10:18:34,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=114 2024-12-06T10:18:34,921 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:34,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 75 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50230 deadline: 1733480374918, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:34,922 DEBUG [Thread-1811 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4161 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_1' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d., hostname=552d6a33fa09,33397,1733480204743, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-06T10:18:34,930 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:34,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 81 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50246 deadline: 1733480374927, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:34,931 DEBUG [Thread-1813 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4168 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_1' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d., hostname=552d6a33fa09,33397,1733480204743, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-06T10:18:34,943 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:34,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 114 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50198 deadline: 1733480374940, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:34,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=114 2024-12-06T10:18:34,943 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:34,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 109 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50200 deadline: 1733480374941, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:34,944 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:34,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50256 deadline: 1733480374941, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:34,994 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,33397,1733480204743 2024-12-06T10:18:34,995 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33397 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=115 2024-12-06T10:18:34,995 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d. 2024-12-06T10:18:34,995 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d. as already flushing 2024-12-06T10:18:34,995 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d. 2024-12-06T10:18:34,995 ERROR [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] handler.RSProcedureHandler(58): pid=115 java.io.IOException: Unable to complete flush {ENCODED => 0a237205d558afc218e72c1705b7c48d, NAME => 'TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:18:34,995 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=115 java.io.IOException: Unable to complete flush {ENCODED => 0a237205d558afc218e72c1705b7c48d, NAME => 'TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:18:34,996 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.HMaster(4114): Remote procedure failed, pid=115 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 0a237205d558afc218e72c1705b7c48d, NAME => 'TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 0a237205d558afc218e72c1705b7c48d, NAME => 'TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:18:35,010 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:35,014 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241206315212ffe14e403e8657d4255d5197fe_0a237205d558afc218e72c1705b7c48d to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241206315212ffe14e403e8657d4255d5197fe_0a237205d558afc218e72c1705b7c48d 2024-12-06T10:18:35,015 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/.tmp/A/783219202240414ea068d6c1a8b5c3c4, store: [table=TestAcidGuarantees family=A region=0a237205d558afc218e72c1705b7c48d] 2024-12-06T10:18:35,016 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/.tmp/A/783219202240414ea068d6c1a8b5c3c4 is 175, key is test_row_0/A:col10/1733480314588/Put/seqid=0 2024-12-06T10:18:35,022 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742289_1465 (size=48389) 2024-12-06T10:18:35,026 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=213, memsize=29.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/.tmp/A/783219202240414ea068d6c1a8b5c3c4 2024-12-06T10:18:35,036 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/.tmp/B/b1c1ca14321342768f40166190c38dad is 50, key is test_row_0/B:col10/1733480314588/Put/seqid=0 2024-12-06T10:18:35,043 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742290_1466 (size=12151) 2024-12-06T10:18:35,047 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=213 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/.tmp/B/b1c1ca14321342768f40166190c38dad 2024-12-06T10:18:35,054 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/.tmp/C/35fab34a76d14c4b9c8b8c86e05a63b8 is 50, key is test_row_0/C:col10/1733480314588/Put/seqid=0 2024-12-06T10:18:35,079 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742291_1467 (size=12151) 2024-12-06T10:18:35,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=114 2024-12-06T10:18:35,147 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,33397,1733480204743 2024-12-06T10:18:35,151 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33397 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=115 2024-12-06T10:18:35,151 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d. 2024-12-06T10:18:35,151 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d. as already flushing 2024-12-06T10:18:35,151 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d. 2024-12-06T10:18:35,152 ERROR [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] handler.RSProcedureHandler(58): pid=115 java.io.IOException: Unable to complete flush {ENCODED => 0a237205d558afc218e72c1705b7c48d, NAME => 'TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:18:35,152 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=115 java.io.IOException: Unable to complete flush {ENCODED => 0a237205d558afc218e72c1705b7c48d, NAME => 'TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:18:35,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.HMaster(4114): Remote procedure failed, pid=115 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 0a237205d558afc218e72c1705b7c48d, NAME => 'TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 0a237205d558afc218e72c1705b7c48d, NAME => 'TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:18:35,250 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:35,251 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 111 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50200 deadline: 1733480375245, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:35,251 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:35,251 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 116 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50198 deadline: 1733480375245, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:35,251 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:35,252 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50256 deadline: 1733480375246, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:35,304 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,33397,1733480204743 2024-12-06T10:18:35,304 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33397 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=115 2024-12-06T10:18:35,304 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d. 2024-12-06T10:18:35,304 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d. as already flushing 2024-12-06T10:18:35,304 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d. 2024-12-06T10:18:35,304 ERROR [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] handler.RSProcedureHandler(58): pid=115 java.io.IOException: Unable to complete flush {ENCODED => 0a237205d558afc218e72c1705b7c48d, NAME => 'TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:18:35,304 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=115 java.io.IOException: Unable to complete flush {ENCODED => 0a237205d558afc218e72c1705b7c48d, NAME => 'TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:18:35,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.HMaster(4114): Remote procedure failed, pid=115 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 0a237205d558afc218e72c1705b7c48d, NAME => 'TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 0a237205d558afc218e72c1705b7c48d, NAME => 'TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:18:35,445 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=114 2024-12-06T10:18:35,457 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,33397,1733480204743 2024-12-06T10:18:35,457 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33397 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=115 2024-12-06T10:18:35,457 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d. 2024-12-06T10:18:35,457 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d. as already flushing 2024-12-06T10:18:35,457 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d. 2024-12-06T10:18:35,458 ERROR [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] handler.RSProcedureHandler(58): pid=115 java.io.IOException: Unable to complete flush {ENCODED => 0a237205d558afc218e72c1705b7c48d, NAME => 'TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:18:35,458 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=115 java.io.IOException: Unable to complete flush {ENCODED => 0a237205d558afc218e72c1705b7c48d, NAME => 'TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:18:35,458 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.HMaster(4114): Remote procedure failed, pid=115 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 0a237205d558afc218e72c1705b7c48d, NAME => 'TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 0a237205d558afc218e72c1705b7c48d, NAME => 'TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:18:35,481 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=213 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/.tmp/C/35fab34a76d14c4b9c8b8c86e05a63b8 2024-12-06T10:18:35,485 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/.tmp/A/783219202240414ea068d6c1a8b5c3c4 as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/A/783219202240414ea068d6c1a8b5c3c4 2024-12-06T10:18:35,488 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/A/783219202240414ea068d6c1a8b5c3c4, entries=250, sequenceid=213, filesize=47.3 K 2024-12-06T10:18:35,489 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/.tmp/B/b1c1ca14321342768f40166190c38dad as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/B/b1c1ca14321342768f40166190c38dad 2024-12-06T10:18:35,492 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/B/b1c1ca14321342768f40166190c38dad, entries=150, sequenceid=213, filesize=11.9 K 2024-12-06T10:18:35,493 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/.tmp/C/35fab34a76d14c4b9c8b8c86e05a63b8 as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/C/35fab34a76d14c4b9c8b8c86e05a63b8 2024-12-06T10:18:35,496 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/C/35fab34a76d14c4b9c8b8c86e05a63b8, entries=150, sequenceid=213, filesize=11.9 K 2024-12-06T10:18:35,497 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~87.22 KB/89310, heapSize ~229.22 KB/234720, currentSize=114.05 KB/116790 for 0a237205d558afc218e72c1705b7c48d in 909ms, sequenceid=213, compaction requested=true 2024-12-06T10:18:35,497 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 0a237205d558afc218e72c1705b7c48d: 2024-12-06T10:18:35,498 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-06T10:18:35,498 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 0a237205d558afc218e72c1705b7c48d:A, priority=-2147483648, current under compaction store size is 1 2024-12-06T10:18:35,498 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T10:18:35,498 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-06T10:18:35,498 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 0a237205d558afc218e72c1705b7c48d:B, priority=-2147483648, current under compaction store size is 2 2024-12-06T10:18:35,498 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T10:18:35,498 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 0a237205d558afc218e72c1705b7c48d:C, priority=-2147483648, current under compaction store size is 3 2024-12-06T10:18:35,498 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-06T10:18:35,500 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 111009 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-06T10:18:35,500 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HStore(1540): 0a237205d558afc218e72c1705b7c48d/A is initiating minor compaction (all files) 2024-12-06T10:18:35,500 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 0a237205d558afc218e72c1705b7c48d/A in TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d. 2024-12-06T10:18:35,500 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/A/fa91e4bc7549434dafee949bc33a35fa, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/A/e86d9f85a826411799219b878d5bf9bb, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/A/783219202240414ea068d6c1a8b5c3c4] into tmpdir=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/.tmp, totalSize=108.4 K 2024-12-06T10:18:35,500 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d. 2024-12-06T10:18:35,500 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d. files: [hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/A/fa91e4bc7549434dafee949bc33a35fa, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/A/e86d9f85a826411799219b878d5bf9bb, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/A/783219202240414ea068d6c1a8b5c3c4] 2024-12-06T10:18:35,504 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36863 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-06T10:18:35,504 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.Compactor(224): Compacting fa91e4bc7549434dafee949bc33a35fa, keycount=150, bloomtype=ROW, size=30.8 K, encoding=NONE, compression=NONE, seqNum=172, earliestPutTs=1733480312425 2024-12-06T10:18:35,504 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HStore(1540): 0a237205d558afc218e72c1705b7c48d/B is initiating minor compaction (all files) 2024-12-06T10:18:35,504 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 0a237205d558afc218e72c1705b7c48d/B in TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d. 2024-12-06T10:18:35,504 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/B/20ef227323754aa4967559eafa504aea, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/B/662fabc51125429d93e6b9ead1a439fe, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/B/b1c1ca14321342768f40166190c38dad] into tmpdir=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/.tmp, totalSize=36.0 K 2024-12-06T10:18:35,504 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.Compactor(224): Compacting e86d9f85a826411799219b878d5bf9bb, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=194, earliestPutTs=1733480312780 2024-12-06T10:18:35,504 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.Compactor(224): Compacting 20ef227323754aa4967559eafa504aea, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=172, earliestPutTs=1733480312425 2024-12-06T10:18:35,505 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.Compactor(224): Compacting 783219202240414ea068d6c1a8b5c3c4, keycount=250, bloomtype=ROW, size=47.3 K, encoding=NONE, compression=NONE, seqNum=213, earliestPutTs=1733480313429 2024-12-06T10:18:35,505 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.Compactor(224): Compacting 662fabc51125429d93e6b9ead1a439fe, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=194, earliestPutTs=1733480312780 2024-12-06T10:18:35,505 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.Compactor(224): Compacting b1c1ca14321342768f40166190c38dad, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=213, earliestPutTs=1733480313454 2024-12-06T10:18:35,512 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=0a237205d558afc218e72c1705b7c48d] 2024-12-06T10:18:35,518 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 0a237205d558afc218e72c1705b7c48d#B#compaction#399 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-06T10:18:35,519 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/.tmp/B/a4945403e6a4453c8ea7b7637f1a1cee is 50, key is test_row_0/B:col10/1733480314588/Put/seqid=0 2024-12-06T10:18:35,532 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241206321b8e3ba2c3413a846a0581dbad382e_0a237205d558afc218e72c1705b7c48d store=[table=TestAcidGuarantees family=A region=0a237205d558afc218e72c1705b7c48d] 2024-12-06T10:18:35,534 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241206321b8e3ba2c3413a846a0581dbad382e_0a237205d558afc218e72c1705b7c48d, store=[table=TestAcidGuarantees family=A region=0a237205d558afc218e72c1705b7c48d] 2024-12-06T10:18:35,535 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241206321b8e3ba2c3413a846a0581dbad382e_0a237205d558afc218e72c1705b7c48d because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=0a237205d558afc218e72c1705b7c48d] 2024-12-06T10:18:35,559 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742292_1468 (size=12663) 2024-12-06T10:18:35,568 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/.tmp/B/a4945403e6a4453c8ea7b7637f1a1cee as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/B/a4945403e6a4453c8ea7b7637f1a1cee 2024-12-06T10:18:35,576 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 0a237205d558afc218e72c1705b7c48d/B of 0a237205d558afc218e72c1705b7c48d into a4945403e6a4453c8ea7b7637f1a1cee(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-06T10:18:35,576 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 0a237205d558afc218e72c1705b7c48d: 2024-12-06T10:18:35,576 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d., storeName=0a237205d558afc218e72c1705b7c48d/B, priority=13, startTime=1733480315498; duration=0sec 2024-12-06T10:18:35,576 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-06T10:18:35,576 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 0a237205d558afc218e72c1705b7c48d:B 2024-12-06T10:18:35,577 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-06T10:18:35,578 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36863 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-06T10:18:35,578 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HStore(1540): 0a237205d558afc218e72c1705b7c48d/C is initiating minor compaction (all files) 2024-12-06T10:18:35,578 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 0a237205d558afc218e72c1705b7c48d/C in TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d. 2024-12-06T10:18:35,578 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/C/6e14c7f1826042f09a9b0cdb0a4ed180, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/C/d5b10eeb4b944763a80a6d5f49f31064, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/C/35fab34a76d14c4b9c8b8c86e05a63b8] into tmpdir=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/.tmp, totalSize=36.0 K 2024-12-06T10:18:35,578 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.Compactor(224): Compacting 6e14c7f1826042f09a9b0cdb0a4ed180, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=172, earliestPutTs=1733480312425 2024-12-06T10:18:35,578 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.Compactor(224): Compacting d5b10eeb4b944763a80a6d5f49f31064, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=194, earliestPutTs=1733480312780 2024-12-06T10:18:35,579 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.Compactor(224): Compacting 35fab34a76d14c4b9c8b8c86e05a63b8, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=213, earliestPutTs=1733480313454 2024-12-06T10:18:35,583 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742293_1469 (size=4469) 2024-12-06T10:18:35,598 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 0a237205d558afc218e72c1705b7c48d#C#compaction#400 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-06T10:18:35,599 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/.tmp/C/6e96852b9e364f46a36c28c075dde785 is 50, key is test_row_0/C:col10/1733480314588/Put/seqid=0 2024-12-06T10:18:35,610 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,33397,1733480204743 2024-12-06T10:18:35,610 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33397 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=115 2024-12-06T10:18:35,610 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d. 2024-12-06T10:18:35,611 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegion(2837): Flushing 0a237205d558afc218e72c1705b7c48d 3/3 column families, dataSize=114.05 KB heapSize=299.58 KB 2024-12-06T10:18:35,611 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0a237205d558afc218e72c1705b7c48d, store=A 2024-12-06T10:18:35,611 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:18:35,611 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0a237205d558afc218e72c1705b7c48d, store=B 2024-12-06T10:18:35,611 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:18:35,611 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0a237205d558afc218e72c1705b7c48d, store=C 2024-12-06T10:18:35,611 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:18:35,618 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412066798a1f5078a4e7daffdc0633e310a2d_0a237205d558afc218e72c1705b7c48d is 50, key is test_row_0/A:col10/1733480314618/Put/seqid=0 2024-12-06T10:18:35,622 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742294_1470 (size=12663) 2024-12-06T10:18:35,654 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742295_1471 (size=12304) 2024-12-06T10:18:35,655 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:35,660 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412066798a1f5078a4e7daffdc0633e310a2d_0a237205d558afc218e72c1705b7c48d to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412066798a1f5078a4e7daffdc0633e310a2d_0a237205d558afc218e72c1705b7c48d 2024-12-06T10:18:35,661 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/.tmp/A/aee8450be2c04ba2a4eca27ba844fc96, store: [table=TestAcidGuarantees family=A region=0a237205d558afc218e72c1705b7c48d] 2024-12-06T10:18:35,662 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/.tmp/A/aee8450be2c04ba2a4eca27ba844fc96 is 175, key is test_row_0/A:col10/1733480314618/Put/seqid=0 2024-12-06T10:18:35,687 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742296_1472 (size=31105) 2024-12-06T10:18:35,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(8581): Flush requested on 0a237205d558afc218e72c1705b7c48d 2024-12-06T10:18:35,759 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d. as already flushing 2024-12-06T10:18:35,785 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:35,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 108 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50256 deadline: 1733480375779, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:35,791 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:35,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 117 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50200 deadline: 1733480375783, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:35,791 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:35,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 123 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50198 deadline: 1733480375785, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:35,892 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:35,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 110 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50256 deadline: 1733480375886, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:35,898 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:35,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 119 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50200 deadline: 1733480375892, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:35,899 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:35,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 125 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50198 deadline: 1733480375892, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:35,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=114 2024-12-06T10:18:35,983 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 0a237205d558afc218e72c1705b7c48d#A#compaction#398 average throughput is 0.05 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-06T10:18:35,984 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/.tmp/A/0ffb75d39726404fa8029068f3efcc72 is 175, key is test_row_0/A:col10/1733480314588/Put/seqid=0 2024-12-06T10:18:35,987 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742297_1473 (size=31617) 2024-12-06T10:18:36,026 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/.tmp/C/6e96852b9e364f46a36c28c075dde785 as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/C/6e96852b9e364f46a36c28c075dde785 2024-12-06T10:18:36,030 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 0a237205d558afc218e72c1705b7c48d/C of 0a237205d558afc218e72c1705b7c48d into 6e96852b9e364f46a36c28c075dde785(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-06T10:18:36,030 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 0a237205d558afc218e72c1705b7c48d: 2024-12-06T10:18:36,030 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d., storeName=0a237205d558afc218e72c1705b7c48d/C, priority=13, startTime=1733480315498; duration=0sec 2024-12-06T10:18:36,030 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T10:18:36,030 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 0a237205d558afc218e72c1705b7c48d:C 2024-12-06T10:18:36,083 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=234, memsize=38.0 K, hasBloomFilter=true, into tmp file hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/.tmp/A/aee8450be2c04ba2a4eca27ba844fc96 2024-12-06T10:18:36,091 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/.tmp/B/626a4f51a1d648fc8d4a0c04266f79e1 is 50, key is test_row_0/B:col10/1733480314618/Put/seqid=0 2024-12-06T10:18:36,094 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742298_1474 (size=12151) 2024-12-06T10:18:36,095 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=234 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/.tmp/B/626a4f51a1d648fc8d4a0c04266f79e1 2024-12-06T10:18:36,097 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:36,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 112 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50256 deadline: 1733480376094, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:36,108 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/.tmp/C/cb3ccfb8929d4960b6da3358e6559f83 is 50, key is test_row_0/C:col10/1733480314618/Put/seqid=0 2024-12-06T10:18:36,110 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:36,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 127 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50198 deadline: 1733480376106, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:36,110 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:36,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 121 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50200 deadline: 1733480376107, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:36,113 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742299_1475 (size=12151) 2024-12-06T10:18:36,392 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/.tmp/A/0ffb75d39726404fa8029068f3efcc72 as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/A/0ffb75d39726404fa8029068f3efcc72 2024-12-06T10:18:36,396 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 0a237205d558afc218e72c1705b7c48d/A of 0a237205d558afc218e72c1705b7c48d into 0ffb75d39726404fa8029068f3efcc72(size=30.9 K), total size for store is 30.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-06T10:18:36,396 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 0a237205d558afc218e72c1705b7c48d: 2024-12-06T10:18:36,396 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d., storeName=0a237205d558afc218e72c1705b7c48d/A, priority=13, startTime=1733480315498; duration=0sec 2024-12-06T10:18:36,396 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T10:18:36,396 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 0a237205d558afc218e72c1705b7c48d:A 2024-12-06T10:18:36,403 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:36,403 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 114 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50256 deadline: 1733480376400, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:36,416 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:36,416 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 123 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50200 deadline: 1733480376412, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:36,416 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:36,416 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 129 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50198 deadline: 1733480376412, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:36,514 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=234 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/.tmp/C/cb3ccfb8929d4960b6da3358e6559f83 2024-12-06T10:18:36,518 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/.tmp/A/aee8450be2c04ba2a4eca27ba844fc96 as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/A/aee8450be2c04ba2a4eca27ba844fc96 2024-12-06T10:18:36,529 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/A/aee8450be2c04ba2a4eca27ba844fc96, entries=150, sequenceid=234, filesize=30.4 K 2024-12-06T10:18:36,530 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/.tmp/B/626a4f51a1d648fc8d4a0c04266f79e1 as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/B/626a4f51a1d648fc8d4a0c04266f79e1 2024-12-06T10:18:36,533 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/B/626a4f51a1d648fc8d4a0c04266f79e1, entries=150, sequenceid=234, filesize=11.9 K 2024-12-06T10:18:36,534 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/.tmp/C/cb3ccfb8929d4960b6da3358e6559f83 as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/C/cb3ccfb8929d4960b6da3358e6559f83 2024-12-06T10:18:36,538 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/C/cb3ccfb8929d4960b6da3358e6559f83, entries=150, sequenceid=234, filesize=11.9 K 2024-12-06T10:18:36,539 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegion(3040): Finished flush of dataSize ~114.05 KB/116790, heapSize ~299.53 KB/306720, currentSize=87.22 KB/89310 for 0a237205d558afc218e72c1705b7c48d in 929ms, sequenceid=234, compaction requested=false 2024-12-06T10:18:36,539 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegion(2538): Flush status journal for 0a237205d558afc218e72c1705b7c48d: 2024-12-06T10:18:36,539 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d. 2024-12-06T10:18:36,540 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=115 2024-12-06T10:18:36,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.HMaster(4106): Remote procedure done, pid=115 2024-12-06T10:18:36,542 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=115, resume processing ppid=114 2024-12-06T10:18:36,542 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=115, ppid=114, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.6990 sec 2024-12-06T10:18:36,543 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=114, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=114, table=TestAcidGuarantees in 1.7040 sec 2024-12-06T10:18:36,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(8581): Flush requested on 0a237205d558afc218e72c1705b7c48d 2024-12-06T10:18:36,912 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 0a237205d558afc218e72c1705b7c48d 3/3 column families, dataSize=93.93 KB heapSize=246.84 KB 2024-12-06T10:18:36,912 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0a237205d558afc218e72c1705b7c48d, store=A 2024-12-06T10:18:36,913 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:18:36,913 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0a237205d558afc218e72c1705b7c48d, store=B 2024-12-06T10:18:36,913 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:18:36,913 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0a237205d558afc218e72c1705b7c48d, store=C 2024-12-06T10:18:36,913 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:18:36,920 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241206718264efe8d04d15b40e507635b8c656_0a237205d558afc218e72c1705b7c48d is 50, key is test_row_0/A:col10/1733480315770/Put/seqid=0 2024-12-06T10:18:36,925 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742300_1476 (size=14794) 2024-12-06T10:18:36,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=114 2024-12-06T10:18:36,948 INFO [Thread-1817 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 114 completed 2024-12-06T10:18:36,949 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-06T10:18:36,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] procedure2.ProcedureExecutor(1098): Stored pid=116, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=116, table=TestAcidGuarantees 2024-12-06T10:18:36,950 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=116, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=116, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-06T10:18:36,950 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=116, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=116, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-06T10:18:36,950 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=117, ppid=116, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-06T10:18:36,951 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:36,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 130 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50200 deadline: 1733480376944, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:36,951 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:36,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 135 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50198 deadline: 1733480376946, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:36,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=116 2024-12-06T10:18:36,956 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:36,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 124 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50256 deadline: 1733480376951, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:37,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=116 2024-12-06T10:18:37,056 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:37,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 137 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50198 deadline: 1733480377053, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:37,056 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:37,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 132 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50200 deadline: 1733480377053, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:37,062 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:37,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 126 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50256 deadline: 1733480377057, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:37,102 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,33397,1733480204743 2024-12-06T10:18:37,103 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33397 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=117 2024-12-06T10:18:37,103 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d. 2024-12-06T10:18:37,103 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d. as already flushing 2024-12-06T10:18:37,103 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d. 2024-12-06T10:18:37,103 ERROR [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] handler.RSProcedureHandler(58): pid=117 java.io.IOException: Unable to complete flush {ENCODED => 0a237205d558afc218e72c1705b7c48d, NAME => 'TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:18:37,103 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=117 java.io.IOException: Unable to complete flush {ENCODED => 0a237205d558afc218e72c1705b7c48d, NAME => 'TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:18:37,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.HMaster(4114): Remote procedure failed, pid=117 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 0a237205d558afc218e72c1705b7c48d, NAME => 'TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 0a237205d558afc218e72c1705b7c48d, NAME => 'TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:18:37,253 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=116 2024-12-06T10:18:37,255 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,33397,1733480204743 2024-12-06T10:18:37,255 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33397 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=117 2024-12-06T10:18:37,255 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d. 2024-12-06T10:18:37,256 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d. as already flushing 2024-12-06T10:18:37,256 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d. 2024-12-06T10:18:37,256 ERROR [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] handler.RSProcedureHandler(58): pid=117 java.io.IOException: Unable to complete flush {ENCODED => 0a237205d558afc218e72c1705b7c48d, NAME => 'TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:18:37,256 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=117 java.io.IOException: Unable to complete flush {ENCODED => 0a237205d558afc218e72c1705b7c48d, NAME => 'TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:18:37,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.HMaster(4114): Remote procedure failed, pid=117 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 0a237205d558afc218e72c1705b7c48d, NAME => 'TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 0a237205d558afc218e72c1705b7c48d, NAME => 'TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:18:37,261 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:37,261 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 139 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50198 deadline: 1733480377257, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:37,262 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:37,262 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 134 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50200 deadline: 1733480377258, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:37,269 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:37,269 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 128 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50256 deadline: 1733480377264, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:37,326 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:37,330 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241206718264efe8d04d15b40e507635b8c656_0a237205d558afc218e72c1705b7c48d to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241206718264efe8d04d15b40e507635b8c656_0a237205d558afc218e72c1705b7c48d 2024-12-06T10:18:37,331 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/.tmp/A/c2db1dd4daf645499eb818d5d8d2e6ee, store: [table=TestAcidGuarantees family=A region=0a237205d558afc218e72c1705b7c48d] 2024-12-06T10:18:37,332 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/.tmp/A/c2db1dd4daf645499eb818d5d8d2e6ee is 175, key is test_row_0/A:col10/1733480315770/Put/seqid=0 2024-12-06T10:18:37,340 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742301_1477 (size=39749) 2024-12-06T10:18:37,408 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,33397,1733480204743 2024-12-06T10:18:37,408 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33397 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=117 2024-12-06T10:18:37,408 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d. 2024-12-06T10:18:37,408 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d. as already flushing 2024-12-06T10:18:37,408 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d. 2024-12-06T10:18:37,409 ERROR [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] handler.RSProcedureHandler(58): pid=117 java.io.IOException: Unable to complete flush {ENCODED => 0a237205d558afc218e72c1705b7c48d, NAME => 'TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:18:37,409 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=117 java.io.IOException: Unable to complete flush {ENCODED => 0a237205d558afc218e72c1705b7c48d, NAME => 'TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:18:37,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.HMaster(4114): Remote procedure failed, pid=117 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 0a237205d558afc218e72c1705b7c48d, NAME => 'TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 0a237205d558afc218e72c1705b7c48d, NAME => 'TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:18:37,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=116 2024-12-06T10:18:37,560 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,33397,1733480204743 2024-12-06T10:18:37,561 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33397 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=117 2024-12-06T10:18:37,561 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d. 2024-12-06T10:18:37,561 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d. as already flushing 2024-12-06T10:18:37,561 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d. 2024-12-06T10:18:37,561 ERROR [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] handler.RSProcedureHandler(58): pid=117 java.io.IOException: Unable to complete flush {ENCODED => 0a237205d558afc218e72c1705b7c48d, NAME => 'TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:18:37,561 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=117 java.io.IOException: Unable to complete flush {ENCODED => 0a237205d558afc218e72c1705b7c48d, NAME => 'TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:18:37,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.HMaster(4114): Remote procedure failed, pid=117 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 0a237205d558afc218e72c1705b7c48d, NAME => 'TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 0a237205d558afc218e72c1705b7c48d, NAME => 'TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:18:37,568 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:37,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 136 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50200 deadline: 1733480377564, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:37,568 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:37,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 141 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50198 deadline: 1733480377564, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:37,576 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:37,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 130 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50256 deadline: 1733480377570, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:37,713 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,33397,1733480204743 2024-12-06T10:18:37,713 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33397 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=117 2024-12-06T10:18:37,714 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d. 2024-12-06T10:18:37,714 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d. as already flushing 2024-12-06T10:18:37,714 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d. 2024-12-06T10:18:37,714 ERROR [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] handler.RSProcedureHandler(58): pid=117 java.io.IOException: Unable to complete flush {ENCODED => 0a237205d558afc218e72c1705b7c48d, NAME => 'TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:18:37,714 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=117 java.io.IOException: Unable to complete flush {ENCODED => 0a237205d558afc218e72c1705b7c48d, NAME => 'TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:18:37,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.HMaster(4114): Remote procedure failed, pid=117 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 0a237205d558afc218e72c1705b7c48d, NAME => 'TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 0a237205d558afc218e72c1705b7c48d, NAME => 'TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:18:37,741 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=253, memsize=31.3 K, hasBloomFilter=true, into tmp file hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/.tmp/A/c2db1dd4daf645499eb818d5d8d2e6ee 2024-12-06T10:18:37,748 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/.tmp/B/1b0d63a6e6c046e5911cab0d69ea7db6 is 50, key is test_row_0/B:col10/1733480315770/Put/seqid=0 2024-12-06T10:18:37,763 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742302_1478 (size=12151) 2024-12-06T10:18:37,764 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=253 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/.tmp/B/1b0d63a6e6c046e5911cab0d69ea7db6 2024-12-06T10:18:37,771 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/.tmp/C/5dc675390e6e4036b185a4f1379a694a is 50, key is test_row_0/C:col10/1733480315770/Put/seqid=0 2024-12-06T10:18:37,789 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742303_1479 (size=12151) 2024-12-06T10:18:37,866 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,33397,1733480204743 2024-12-06T10:18:37,866 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33397 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=117 2024-12-06T10:18:37,866 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d. 2024-12-06T10:18:37,867 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d. as already flushing 2024-12-06T10:18:37,867 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d. 2024-12-06T10:18:37,867 ERROR [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] handler.RSProcedureHandler(58): pid=117 java.io.IOException: Unable to complete flush {ENCODED => 0a237205d558afc218e72c1705b7c48d, NAME => 'TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:18:37,867 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=117 java.io.IOException: Unable to complete flush {ENCODED => 0a237205d558afc218e72c1705b7c48d, NAME => 'TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:18:37,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.HMaster(4114): Remote procedure failed, pid=117 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 0a237205d558afc218e72c1705b7c48d, NAME => 'TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 0a237205d558afc218e72c1705b7c48d, NAME => 'TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:18:38,019 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,33397,1733480204743 2024-12-06T10:18:38,019 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33397 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=117 2024-12-06T10:18:38,019 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d. 2024-12-06T10:18:38,019 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d. as already flushing 2024-12-06T10:18:38,019 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d. 2024-12-06T10:18:38,019 ERROR [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] handler.RSProcedureHandler(58): pid=117 java.io.IOException: Unable to complete flush {ENCODED => 0a237205d558afc218e72c1705b7c48d, NAME => 'TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:18:38,019 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=117 java.io.IOException: Unable to complete flush {ENCODED => 0a237205d558afc218e72c1705b7c48d, NAME => 'TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:18:38,020 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.HMaster(4114): Remote procedure failed, pid=117 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 0a237205d558afc218e72c1705b7c48d, NAME => 'TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 0a237205d558afc218e72c1705b7c48d, NAME => 'TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:18:38,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=116 2024-12-06T10:18:38,073 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:38,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 138 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50200 deadline: 1733480378069, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:38,074 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:38,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 143 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50198 deadline: 1733480378070, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:38,082 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:38,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 132 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50256 deadline: 1733480378080, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:38,171 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,33397,1733480204743 2024-12-06T10:18:38,172 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33397 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=117 2024-12-06T10:18:38,172 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d. 2024-12-06T10:18:38,172 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d. as already flushing 2024-12-06T10:18:38,172 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d. 2024-12-06T10:18:38,172 ERROR [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] handler.RSProcedureHandler(58): pid=117 java.io.IOException: Unable to complete flush {ENCODED => 0a237205d558afc218e72c1705b7c48d, NAME => 'TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:18:38,172 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=117 java.io.IOException: Unable to complete flush {ENCODED => 0a237205d558afc218e72c1705b7c48d, NAME => 'TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:18:38,173 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.HMaster(4114): Remote procedure failed, pid=117 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 0a237205d558afc218e72c1705b7c48d, NAME => 'TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 0a237205d558afc218e72c1705b7c48d, NAME => 'TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:18:38,190 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=253 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/.tmp/C/5dc675390e6e4036b185a4f1379a694a 2024-12-06T10:18:38,194 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/.tmp/A/c2db1dd4daf645499eb818d5d8d2e6ee as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/A/c2db1dd4daf645499eb818d5d8d2e6ee 2024-12-06T10:18:38,197 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/A/c2db1dd4daf645499eb818d5d8d2e6ee, entries=200, sequenceid=253, filesize=38.8 K 2024-12-06T10:18:38,198 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/.tmp/B/1b0d63a6e6c046e5911cab0d69ea7db6 as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/B/1b0d63a6e6c046e5911cab0d69ea7db6 2024-12-06T10:18:38,201 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/B/1b0d63a6e6c046e5911cab0d69ea7db6, entries=150, sequenceid=253, filesize=11.9 K 2024-12-06T10:18:38,202 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/.tmp/C/5dc675390e6e4036b185a4f1379a694a as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/C/5dc675390e6e4036b185a4f1379a694a 2024-12-06T10:18:38,204 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/C/5dc675390e6e4036b185a4f1379a694a, entries=150, sequenceid=253, filesize=11.9 K 2024-12-06T10:18:38,206 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~93.93 KB/96180, heapSize ~246.80 KB/252720, currentSize=107.34 KB/109920 for 0a237205d558afc218e72c1705b7c48d in 1293ms, sequenceid=253, compaction requested=true 2024-12-06T10:18:38,206 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 0a237205d558afc218e72c1705b7c48d: 2024-12-06T10:18:38,206 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 0a237205d558afc218e72c1705b7c48d:A, priority=-2147483648, current under compaction store size is 1 2024-12-06T10:18:38,206 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T10:18:38,206 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-06T10:18:38,206 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 0a237205d558afc218e72c1705b7c48d:B, priority=-2147483648, current under compaction store size is 2 2024-12-06T10:18:38,206 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T10:18:38,206 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 0a237205d558afc218e72c1705b7c48d:C, priority=-2147483648, current under compaction store size is 3 2024-12-06T10:18:38,206 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-06T10:18:38,206 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-06T10:18:38,207 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36965 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-06T10:18:38,207 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 102471 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-06T10:18:38,207 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HStore(1540): 0a237205d558afc218e72c1705b7c48d/B is initiating minor compaction (all files) 2024-12-06T10:18:38,207 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HStore(1540): 0a237205d558afc218e72c1705b7c48d/A is initiating minor compaction (all files) 2024-12-06T10:18:38,207 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 0a237205d558afc218e72c1705b7c48d/A in TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d. 2024-12-06T10:18:38,207 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/A/0ffb75d39726404fa8029068f3efcc72, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/A/aee8450be2c04ba2a4eca27ba844fc96, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/A/c2db1dd4daf645499eb818d5d8d2e6ee] into tmpdir=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/.tmp, totalSize=100.1 K 2024-12-06T10:18:38,207 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d. 2024-12-06T10:18:38,207 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d. files: [hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/A/0ffb75d39726404fa8029068f3efcc72, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/A/aee8450be2c04ba2a4eca27ba844fc96, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/A/c2db1dd4daf645499eb818d5d8d2e6ee] 2024-12-06T10:18:38,207 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 0a237205d558afc218e72c1705b7c48d/B in TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d. 2024-12-06T10:18:38,207 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/B/a4945403e6a4453c8ea7b7637f1a1cee, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/B/626a4f51a1d648fc8d4a0c04266f79e1, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/B/1b0d63a6e6c046e5911cab0d69ea7db6] into tmpdir=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/.tmp, totalSize=36.1 K 2024-12-06T10:18:38,207 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.Compactor(224): Compacting 0ffb75d39726404fa8029068f3efcc72, keycount=150, bloomtype=ROW, size=30.9 K, encoding=NONE, compression=NONE, seqNum=213, earliestPutTs=1733480313454 2024-12-06T10:18:38,208 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.Compactor(224): Compacting a4945403e6a4453c8ea7b7637f1a1cee, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=213, earliestPutTs=1733480313454 2024-12-06T10:18:38,208 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.Compactor(224): Compacting 626a4f51a1d648fc8d4a0c04266f79e1, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=234, earliestPutTs=1733480314618 2024-12-06T10:18:38,208 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.Compactor(224): Compacting aee8450be2c04ba2a4eca27ba844fc96, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=234, earliestPutTs=1733480314618 2024-12-06T10:18:38,208 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.Compactor(224): Compacting c2db1dd4daf645499eb818d5d8d2e6ee, keycount=200, bloomtype=ROW, size=38.8 K, encoding=NONE, compression=NONE, seqNum=253, earliestPutTs=1733480315770 2024-12-06T10:18:38,208 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.Compactor(224): Compacting 1b0d63a6e6c046e5911cab0d69ea7db6, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=253, earliestPutTs=1733480315770 2024-12-06T10:18:38,214 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=0a237205d558afc218e72c1705b7c48d] 2024-12-06T10:18:38,215 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 0a237205d558afc218e72c1705b7c48d#B#compaction#408 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-06T10:18:38,216 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/.tmp/B/cc2580b982ef47639486728bda1005eb is 50, key is test_row_0/B:col10/1733480315770/Put/seqid=0 2024-12-06T10:18:38,218 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e2024120662cfa859e9ba41ccbaed9c36fac52811_0a237205d558afc218e72c1705b7c48d store=[table=TestAcidGuarantees family=A region=0a237205d558afc218e72c1705b7c48d] 2024-12-06T10:18:38,219 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e2024120662cfa859e9ba41ccbaed9c36fac52811_0a237205d558afc218e72c1705b7c48d, store=[table=TestAcidGuarantees family=A region=0a237205d558afc218e72c1705b7c48d] 2024-12-06T10:18:38,219 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024120662cfa859e9ba41ccbaed9c36fac52811_0a237205d558afc218e72c1705b7c48d because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=0a237205d558afc218e72c1705b7c48d] 2024-12-06T10:18:38,233 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742304_1480 (size=12765) 2024-12-06T10:18:38,244 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742305_1481 (size=4469) 2024-12-06T10:18:38,324 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,33397,1733480204743 2024-12-06T10:18:38,325 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33397 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=117 2024-12-06T10:18:38,325 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d. 2024-12-06T10:18:38,325 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HRegion(2837): Flushing 0a237205d558afc218e72c1705b7c48d 3/3 column families, dataSize=107.34 KB heapSize=282 KB 2024-12-06T10:18:38,325 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0a237205d558afc218e72c1705b7c48d, store=A 2024-12-06T10:18:38,326 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:18:38,326 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0a237205d558afc218e72c1705b7c48d, store=B 2024-12-06T10:18:38,326 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:18:38,326 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0a237205d558afc218e72c1705b7c48d, store=C 2024-12-06T10:18:38,326 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:18:38,332 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241206312d879309c24854910a569627b1e000_0a237205d558afc218e72c1705b7c48d is 50, key is test_row_0/A:col10/1733480316939/Put/seqid=0 2024-12-06T10:18:38,345 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742306_1482 (size=12454) 2024-12-06T10:18:38,639 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/.tmp/B/cc2580b982ef47639486728bda1005eb as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/B/cc2580b982ef47639486728bda1005eb 2024-12-06T10:18:38,647 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 0a237205d558afc218e72c1705b7c48d#A#compaction#407 average throughput is 0.06 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-06T10:18:38,648 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/.tmp/A/e00e3057332b479c8d33b5ee9d7d5771 is 175, key is test_row_0/A:col10/1733480315770/Put/seqid=0 2024-12-06T10:18:38,648 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 0a237205d558afc218e72c1705b7c48d/B of 0a237205d558afc218e72c1705b7c48d into cc2580b982ef47639486728bda1005eb(size=12.5 K), total size for store is 12.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-06T10:18:38,648 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 0a237205d558afc218e72c1705b7c48d: 2024-12-06T10:18:38,648 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d., storeName=0a237205d558afc218e72c1705b7c48d/B, priority=13, startTime=1733480318206; duration=0sec 2024-12-06T10:18:38,648 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-06T10:18:38,648 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 0a237205d558afc218e72c1705b7c48d:B 2024-12-06T10:18:38,649 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-06T10:18:38,650 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36965 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-06T10:18:38,650 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HStore(1540): 0a237205d558afc218e72c1705b7c48d/C is initiating minor compaction (all files) 2024-12-06T10:18:38,650 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 0a237205d558afc218e72c1705b7c48d/C in TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d. 2024-12-06T10:18:38,650 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/C/6e96852b9e364f46a36c28c075dde785, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/C/cb3ccfb8929d4960b6da3358e6559f83, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/C/5dc675390e6e4036b185a4f1379a694a] into tmpdir=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/.tmp, totalSize=36.1 K 2024-12-06T10:18:38,651 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.Compactor(224): Compacting 6e96852b9e364f46a36c28c075dde785, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=213, earliestPutTs=1733480313454 2024-12-06T10:18:38,651 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.Compactor(224): Compacting cb3ccfb8929d4960b6da3358e6559f83, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=234, earliestPutTs=1733480314618 2024-12-06T10:18:38,651 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.Compactor(224): Compacting 5dc675390e6e4036b185a4f1379a694a, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=253, earliestPutTs=1733480315770 2024-12-06T10:18:38,652 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742307_1483 (size=31719) 2024-12-06T10:18:38,656 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/.tmp/A/e00e3057332b479c8d33b5ee9d7d5771 as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/A/e00e3057332b479c8d33b5ee9d7d5771 2024-12-06T10:18:38,660 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 0a237205d558afc218e72c1705b7c48d#C#compaction#410 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-06T10:18:38,660 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/.tmp/C/fbe6f3d61f424f279cb0082bb58df4c6 is 50, key is test_row_0/C:col10/1733480315770/Put/seqid=0 2024-12-06T10:18:38,662 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 0a237205d558afc218e72c1705b7c48d/A of 0a237205d558afc218e72c1705b7c48d into e00e3057332b479c8d33b5ee9d7d5771(size=31.0 K), total size for store is 31.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-06T10:18:38,662 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 0a237205d558afc218e72c1705b7c48d: 2024-12-06T10:18:38,662 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d., storeName=0a237205d558afc218e72c1705b7c48d/A, priority=13, startTime=1733480318206; duration=0sec 2024-12-06T10:18:38,662 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T10:18:38,662 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 0a237205d558afc218e72c1705b7c48d:A 2024-12-06T10:18:38,665 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742308_1484 (size=12765) 2024-12-06T10:18:38,746 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:38,750 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241206312d879309c24854910a569627b1e000_0a237205d558afc218e72c1705b7c48d to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241206312d879309c24854910a569627b1e000_0a237205d558afc218e72c1705b7c48d 2024-12-06T10:18:38,751 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/.tmp/A/a67e947d0b05464aa3e8fdfc60f632d9, store: [table=TestAcidGuarantees family=A region=0a237205d558afc218e72c1705b7c48d] 2024-12-06T10:18:38,752 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/.tmp/A/a67e947d0b05464aa3e8fdfc60f632d9 is 175, key is test_row_0/A:col10/1733480316939/Put/seqid=0 2024-12-06T10:18:38,757 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742309_1485 (size=31255) 2024-12-06T10:18:38,758 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=272, memsize=35.8 K, hasBloomFilter=true, into tmp file hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/.tmp/A/a67e947d0b05464aa3e8fdfc60f632d9 2024-12-06T10:18:38,765 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/.tmp/B/535b67126e8b48628228c3a014b70754 is 50, key is test_row_0/B:col10/1733480316939/Put/seqid=0 2024-12-06T10:18:38,768 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742310_1486 (size=12301) 2024-12-06T10:18:38,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(8581): Flush requested on 0a237205d558afc218e72c1705b7c48d 2024-12-06T10:18:38,937 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d. as already flushing 2024-12-06T10:18:38,985 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:38,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50246 deadline: 1733480378981, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:38,990 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:38,990 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 89 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50230 deadline: 1733480378982, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:39,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=116 2024-12-06T10:18:39,069 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/.tmp/C/fbe6f3d61f424f279cb0082bb58df4c6 as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/C/fbe6f3d61f424f279cb0082bb58df4c6 2024-12-06T10:18:39,073 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 0a237205d558afc218e72c1705b7c48d/C of 0a237205d558afc218e72c1705b7c48d into fbe6f3d61f424f279cb0082bb58df4c6(size=12.5 K), total size for store is 12.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-06T10:18:39,073 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 0a237205d558afc218e72c1705b7c48d: 2024-12-06T10:18:39,073 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d., storeName=0a237205d558afc218e72c1705b7c48d/C, priority=13, startTime=1733480318206; duration=0sec 2024-12-06T10:18:39,074 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T10:18:39,074 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 0a237205d558afc218e72c1705b7c48d:C 2024-12-06T10:18:39,081 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:39,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 145 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50198 deadline: 1733480379076, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:39,081 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:39,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 140 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50200 deadline: 1733480379080, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:39,088 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:39,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 87 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50246 deadline: 1733480379086, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:39,096 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:39,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 91 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50230 deadline: 1733480379091, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:39,096 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:39,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 134 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50256 deadline: 1733480379093, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:39,169 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=272 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/.tmp/B/535b67126e8b48628228c3a014b70754 2024-12-06T10:18:39,176 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/.tmp/C/b2f468b415f34478a9441680107ead08 is 50, key is test_row_0/C:col10/1733480316939/Put/seqid=0 2024-12-06T10:18:39,181 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742311_1487 (size=12301) 2024-12-06T10:18:39,181 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=272 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/.tmp/C/b2f468b415f34478a9441680107ead08 2024-12-06T10:18:39,185 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/.tmp/A/a67e947d0b05464aa3e8fdfc60f632d9 as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/A/a67e947d0b05464aa3e8fdfc60f632d9 2024-12-06T10:18:39,188 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/A/a67e947d0b05464aa3e8fdfc60f632d9, entries=150, sequenceid=272, filesize=30.5 K 2024-12-06T10:18:39,189 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/.tmp/B/535b67126e8b48628228c3a014b70754 as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/B/535b67126e8b48628228c3a014b70754 2024-12-06T10:18:39,192 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/B/535b67126e8b48628228c3a014b70754, entries=150, sequenceid=272, filesize=12.0 K 2024-12-06T10:18:39,193 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/.tmp/C/b2f468b415f34478a9441680107ead08 as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/C/b2f468b415f34478a9441680107ead08 2024-12-06T10:18:39,196 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/C/b2f468b415f34478a9441680107ead08, entries=150, sequenceid=272, filesize=12.0 K 2024-12-06T10:18:39,197 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HRegion(3040): Finished flush of dataSize ~107.34 KB/109920, heapSize ~281.95 KB/288720, currentSize=93.93 KB/96180 for 0a237205d558afc218e72c1705b7c48d in 872ms, sequenceid=272, compaction requested=false 2024-12-06T10:18:39,197 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HRegion(2538): Flush status journal for 0a237205d558afc218e72c1705b7c48d: 2024-12-06T10:18:39,197 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d. 2024-12-06T10:18:39,197 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=117 2024-12-06T10:18:39,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.HMaster(4106): Remote procedure done, pid=117 2024-12-06T10:18:39,200 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=117, resume processing ppid=116 2024-12-06T10:18:39,200 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=117, ppid=116, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.2490 sec 2024-12-06T10:18:39,201 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=116, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=116, table=TestAcidGuarantees in 2.2510 sec 2024-12-06T10:18:39,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(8581): Flush requested on 0a237205d558afc218e72c1705b7c48d 2024-12-06T10:18:39,293 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 0a237205d558afc218e72c1705b7c48d 3/3 column families, dataSize=100.63 KB heapSize=264.42 KB 2024-12-06T10:18:39,294 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0a237205d558afc218e72c1705b7c48d, store=A 2024-12-06T10:18:39,294 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:18:39,294 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0a237205d558afc218e72c1705b7c48d, store=B 2024-12-06T10:18:39,294 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:18:39,294 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0a237205d558afc218e72c1705b7c48d, store=C 2024-12-06T10:18:39,294 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:18:39,310 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412065c01c7407b41405e95104bfde5c4964f_0a237205d558afc218e72c1705b7c48d is 50, key is test_row_0/A:col10/1733480319292/Put/seqid=0 2024-12-06T10:18:39,322 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742312_1488 (size=12454) 2024-12-06T10:18:39,370 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:39,370 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50246 deadline: 1733480379365, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:39,370 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:39,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 101 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50230 deadline: 1733480379365, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:39,471 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:39,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50246 deadline: 1733480379471, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:39,472 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:39,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 103 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50230 deadline: 1733480379471, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:39,680 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:39,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 105 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50230 deadline: 1733480379674, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:39,680 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:39,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50246 deadline: 1733480379674, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:39,723 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:39,727 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412065c01c7407b41405e95104bfde5c4964f_0a237205d558afc218e72c1705b7c48d to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412065c01c7407b41405e95104bfde5c4964f_0a237205d558afc218e72c1705b7c48d 2024-12-06T10:18:39,729 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/.tmp/A/1d817db982704c2c992ff5763db74ee8, store: [table=TestAcidGuarantees family=A region=0a237205d558afc218e72c1705b7c48d] 2024-12-06T10:18:39,730 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/.tmp/A/1d817db982704c2c992ff5763db74ee8 is 175, key is test_row_0/A:col10/1733480319292/Put/seqid=0 2024-12-06T10:18:39,749 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742313_1489 (size=31255) 2024-12-06T10:18:39,987 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:39,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50230 deadline: 1733480379982, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:39,988 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:39,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 104 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50246 deadline: 1733480379984, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:40,150 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=293, memsize=33.5 K, hasBloomFilter=true, into tmp file hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/.tmp/A/1d817db982704c2c992ff5763db74ee8 2024-12-06T10:18:40,179 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/.tmp/B/691c0a3685b541d1ab1a5829b8f1b70e is 50, key is test_row_0/B:col10/1733480319292/Put/seqid=0 2024-12-06T10:18:40,186 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742314_1490 (size=12301) 2024-12-06T10:18:40,494 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:40,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 109 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50230 deadline: 1733480380488, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:40,494 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:40,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 106 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50246 deadline: 1733480380490, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:40,587 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=293 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/.tmp/B/691c0a3685b541d1ab1a5829b8f1b70e 2024-12-06T10:18:40,594 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/.tmp/C/e86f7e2132d4400190355b05631eeca2 is 50, key is test_row_0/C:col10/1733480319292/Put/seqid=0 2024-12-06T10:18:40,603 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742315_1491 (size=12301) 2024-12-06T10:18:40,604 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=293 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/.tmp/C/e86f7e2132d4400190355b05631eeca2 2024-12-06T10:18:40,608 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/.tmp/A/1d817db982704c2c992ff5763db74ee8 as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/A/1d817db982704c2c992ff5763db74ee8 2024-12-06T10:18:40,612 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/A/1d817db982704c2c992ff5763db74ee8, entries=150, sequenceid=293, filesize=30.5 K 2024-12-06T10:18:40,613 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/.tmp/B/691c0a3685b541d1ab1a5829b8f1b70e as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/B/691c0a3685b541d1ab1a5829b8f1b70e 2024-12-06T10:18:40,617 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/B/691c0a3685b541d1ab1a5829b8f1b70e, entries=150, sequenceid=293, filesize=12.0 K 2024-12-06T10:18:40,617 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/.tmp/C/e86f7e2132d4400190355b05631eeca2 as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/C/e86f7e2132d4400190355b05631eeca2 2024-12-06T10:18:40,621 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/C/e86f7e2132d4400190355b05631eeca2, entries=150, sequenceid=293, filesize=12.0 K 2024-12-06T10:18:40,622 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~100.63 KB/103050, heapSize ~264.38 KB/270720, currentSize=107.34 KB/109920 for 0a237205d558afc218e72c1705b7c48d in 1329ms, sequenceid=293, compaction requested=true 2024-12-06T10:18:40,622 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 0a237205d558afc218e72c1705b7c48d: 2024-12-06T10:18:40,622 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 0a237205d558afc218e72c1705b7c48d:A, priority=-2147483648, current under compaction store size is 1 2024-12-06T10:18:40,622 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T10:18:40,622 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 0a237205d558afc218e72c1705b7c48d:B, priority=-2147483648, current under compaction store size is 2 2024-12-06T10:18:40,622 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-06T10:18:40,623 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-06T10:18:40,623 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 0a237205d558afc218e72c1705b7c48d:C, priority=-2147483648, current under compaction store size is 3 2024-12-06T10:18:40,623 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-12-06T10:18:40,623 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-06T10:18:40,623 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 94229 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-06T10:18:40,624 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HStore(1540): 0a237205d558afc218e72c1705b7c48d/A is initiating minor compaction (all files) 2024-12-06T10:18:40,624 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 0a237205d558afc218e72c1705b7c48d/A in TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d. 2024-12-06T10:18:40,624 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/A/e00e3057332b479c8d33b5ee9d7d5771, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/A/a67e947d0b05464aa3e8fdfc60f632d9, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/A/1d817db982704c2c992ff5763db74ee8] into tmpdir=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/.tmp, totalSize=92.0 K 2024-12-06T10:18:40,624 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d. 2024-12-06T10:18:40,624 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d. files: [hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/A/e00e3057332b479c8d33b5ee9d7d5771, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/A/a67e947d0b05464aa3e8fdfc60f632d9, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/A/1d817db982704c2c992ff5763db74ee8] 2024-12-06T10:18:40,624 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.Compactor(224): Compacting e00e3057332b479c8d33b5ee9d7d5771, keycount=150, bloomtype=ROW, size=31.0 K, encoding=NONE, compression=NONE, seqNum=253, earliestPutTs=1733480315770 2024-12-06T10:18:40,624 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37367 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-06T10:18:40,624 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HStore(1540): 0a237205d558afc218e72c1705b7c48d/B is initiating minor compaction (all files) 2024-12-06T10:18:40,625 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 0a237205d558afc218e72c1705b7c48d/B in TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d. 2024-12-06T10:18:40,625 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/B/cc2580b982ef47639486728bda1005eb, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/B/535b67126e8b48628228c3a014b70754, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/B/691c0a3685b541d1ab1a5829b8f1b70e] into tmpdir=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/.tmp, totalSize=36.5 K 2024-12-06T10:18:40,625 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.Compactor(224): Compacting a67e947d0b05464aa3e8fdfc60f632d9, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=272, earliestPutTs=1733480316936 2024-12-06T10:18:40,625 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.Compactor(224): Compacting cc2580b982ef47639486728bda1005eb, keycount=150, bloomtype=ROW, size=12.5 K, encoding=NONE, compression=NONE, seqNum=253, earliestPutTs=1733480315770 2024-12-06T10:18:40,625 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1d817db982704c2c992ff5763db74ee8, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=293, earliestPutTs=1733480318980 2024-12-06T10:18:40,625 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.Compactor(224): Compacting 535b67126e8b48628228c3a014b70754, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=272, earliestPutTs=1733480316936 2024-12-06T10:18:40,625 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.Compactor(224): Compacting 691c0a3685b541d1ab1a5829b8f1b70e, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=293, earliestPutTs=1733480318980 2024-12-06T10:18:40,631 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=0a237205d558afc218e72c1705b7c48d] 2024-12-06T10:18:40,633 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 0a237205d558afc218e72c1705b7c48d#B#compaction#417 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-06T10:18:40,634 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/.tmp/B/df59ae7c752643c192593d6af6c0927f is 50, key is test_row_0/B:col10/1733480319292/Put/seqid=0 2024-12-06T10:18:40,637 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241206aaa13b3c277b45118619a6087803d873_0a237205d558afc218e72c1705b7c48d store=[table=TestAcidGuarantees family=A region=0a237205d558afc218e72c1705b7c48d] 2024-12-06T10:18:40,638 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241206aaa13b3c277b45118619a6087803d873_0a237205d558afc218e72c1705b7c48d, store=[table=TestAcidGuarantees family=A region=0a237205d558afc218e72c1705b7c48d] 2024-12-06T10:18:40,638 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241206aaa13b3c277b45118619a6087803d873_0a237205d558afc218e72c1705b7c48d because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=0a237205d558afc218e72c1705b7c48d] 2024-12-06T10:18:40,640 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742316_1492 (size=13017) 2024-12-06T10:18:40,647 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742317_1493 (size=4469) 2024-12-06T10:18:41,045 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/.tmp/B/df59ae7c752643c192593d6af6c0927f as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/B/df59ae7c752643c192593d6af6c0927f 2024-12-06T10:18:41,049 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 0a237205d558afc218e72c1705b7c48d#A#compaction#416 average throughput is 0.06 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-06T10:18:41,049 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 0a237205d558afc218e72c1705b7c48d/B of 0a237205d558afc218e72c1705b7c48d into df59ae7c752643c192593d6af6c0927f(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-06T10:18:41,049 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 0a237205d558afc218e72c1705b7c48d: 2024-12-06T10:18:41,049 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d., storeName=0a237205d558afc218e72c1705b7c48d/B, priority=13, startTime=1733480320622; duration=0sec 2024-12-06T10:18:41,049 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-06T10:18:41,049 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 0a237205d558afc218e72c1705b7c48d:B 2024-12-06T10:18:41,049 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-06T10:18:41,050 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/.tmp/A/ef08b294db7d4eda8438e88df7536d54 is 175, key is test_row_0/A:col10/1733480319292/Put/seqid=0 2024-12-06T10:18:41,050 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37367 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-06T10:18:41,050 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HStore(1540): 0a237205d558afc218e72c1705b7c48d/C is initiating minor compaction (all files) 2024-12-06T10:18:41,050 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 0a237205d558afc218e72c1705b7c48d/C in TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d. 2024-12-06T10:18:41,050 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/C/fbe6f3d61f424f279cb0082bb58df4c6, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/C/b2f468b415f34478a9441680107ead08, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/C/e86f7e2132d4400190355b05631eeca2] into tmpdir=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/.tmp, totalSize=36.5 K 2024-12-06T10:18:41,051 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.Compactor(224): Compacting fbe6f3d61f424f279cb0082bb58df4c6, keycount=150, bloomtype=ROW, size=12.5 K, encoding=NONE, compression=NONE, seqNum=253, earliestPutTs=1733480315770 2024-12-06T10:18:41,051 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.Compactor(224): Compacting b2f468b415f34478a9441680107ead08, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=272, earliestPutTs=1733480316936 2024-12-06T10:18:41,051 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.Compactor(224): Compacting e86f7e2132d4400190355b05631eeca2, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=293, earliestPutTs=1733480318980 2024-12-06T10:18:41,054 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742318_1494 (size=31971) 2024-12-06T10:18:41,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=116 2024-12-06T10:18:41,056 INFO [Thread-1817 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 116 completed 2024-12-06T10:18:41,061 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/.tmp/A/ef08b294db7d4eda8438e88df7536d54 as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/A/ef08b294db7d4eda8438e88df7536d54 2024-12-06T10:18:41,061 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 0a237205d558afc218e72c1705b7c48d#C#compaction#418 average throughput is 2.18 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-06T10:18:41,061 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/.tmp/C/9ce62a3633ba4bb1a9d6d30c53aa690c is 50, key is test_row_0/C:col10/1733480319292/Put/seqid=0 2024-12-06T10:18:41,062 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-06T10:18:41,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] procedure2.ProcedureExecutor(1098): Stored pid=118, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=118, table=TestAcidGuarantees 2024-12-06T10:18:41,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=118 2024-12-06T10:18:41,063 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=118, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=118, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-06T10:18:41,064 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=118, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=118, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-06T10:18:41,064 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=119, ppid=118, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-06T10:18:41,068 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 0a237205d558afc218e72c1705b7c48d/A of 0a237205d558afc218e72c1705b7c48d into ef08b294db7d4eda8438e88df7536d54(size=31.2 K), total size for store is 31.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-06T10:18:41,068 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 0a237205d558afc218e72c1705b7c48d: 2024-12-06T10:18:41,068 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d., storeName=0a237205d558afc218e72c1705b7c48d/A, priority=13, startTime=1733480320622; duration=0sec 2024-12-06T10:18:41,069 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T10:18:41,069 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 0a237205d558afc218e72c1705b7c48d:A 2024-12-06T10:18:41,070 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742319_1495 (size=13017) 2024-12-06T10:18:41,073 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/.tmp/C/9ce62a3633ba4bb1a9d6d30c53aa690c as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/C/9ce62a3633ba4bb1a9d6d30c53aa690c 2024-12-06T10:18:41,077 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 0a237205d558afc218e72c1705b7c48d/C of 0a237205d558afc218e72c1705b7c48d into 9ce62a3633ba4bb1a9d6d30c53aa690c(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-06T10:18:41,077 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 0a237205d558afc218e72c1705b7c48d: 2024-12-06T10:18:41,077 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d., storeName=0a237205d558afc218e72c1705b7c48d/C, priority=13, startTime=1733480320623; duration=0sec 2024-12-06T10:18:41,077 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T10:18:41,077 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 0a237205d558afc218e72c1705b7c48d:C 2024-12-06T10:18:41,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(8581): Flush requested on 0a237205d558afc218e72c1705b7c48d 2024-12-06T10:18:41,087 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 0a237205d558afc218e72c1705b7c48d 3/3 column families, dataSize=114.05 KB heapSize=299.58 KB 2024-12-06T10:18:41,087 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0a237205d558afc218e72c1705b7c48d, store=A 2024-12-06T10:18:41,088 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:18:41,088 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0a237205d558afc218e72c1705b7c48d, store=B 2024-12-06T10:18:41,088 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:18:41,088 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0a237205d558afc218e72c1705b7c48d, store=C 2024-12-06T10:18:41,088 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:18:41,094 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241206ee774b3c4daa4227a91eabefa8397873_0a237205d558afc218e72c1705b7c48d is 50, key is test_row_0/A:col10/1733480321086/Put/seqid=0 2024-12-06T10:18:41,098 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742320_1496 (size=14994) 2024-12-06T10:18:41,123 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:41,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 138 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50256 deadline: 1733480381116, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:41,123 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:41,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 154 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50198 deadline: 1733480381121, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:41,125 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:41,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 147 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50200 deadline: 1733480381123, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:41,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=118 2024-12-06T10:18:41,216 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,33397,1733480204743 2024-12-06T10:18:41,217 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33397 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=119 2024-12-06T10:18:41,217 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d. 2024-12-06T10:18:41,217 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d. as already flushing 2024-12-06T10:18:41,217 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d. 2024-12-06T10:18:41,217 ERROR [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] handler.RSProcedureHandler(58): pid=119 java.io.IOException: Unable to complete flush {ENCODED => 0a237205d558afc218e72c1705b7c48d, NAME => 'TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:18:41,217 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=119 java.io.IOException: Unable to complete flush {ENCODED => 0a237205d558afc218e72c1705b7c48d, NAME => 'TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:18:41,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.HMaster(4114): Remote procedure failed, pid=119 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 0a237205d558afc218e72c1705b7c48d, NAME => 'TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 0a237205d558afc218e72c1705b7c48d, NAME => 'TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:18:41,227 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:41,227 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 140 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50256 deadline: 1733480381224, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:41,227 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:41,227 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 156 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50198 deadline: 1733480381224, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:41,228 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:41,228 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 149 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50200 deadline: 1733480381227, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:41,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=118 2024-12-06T10:18:41,369 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,33397,1733480204743 2024-12-06T10:18:41,369 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33397 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=119 2024-12-06T10:18:41,370 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d. 2024-12-06T10:18:41,370 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d. as already flushing 2024-12-06T10:18:41,370 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d. 2024-12-06T10:18:41,370 ERROR [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] handler.RSProcedureHandler(58): pid=119 java.io.IOException: Unable to complete flush {ENCODED => 0a237205d558afc218e72c1705b7c48d, NAME => 'TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:18:41,370 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=119 java.io.IOException: Unable to complete flush {ENCODED => 0a237205d558afc218e72c1705b7c48d, NAME => 'TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:18:41,370 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.HMaster(4114): Remote procedure failed, pid=119 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 0a237205d558afc218e72c1705b7c48d, NAME => 'TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 0a237205d558afc218e72c1705b7c48d, NAME => 'TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:18:41,430 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:41,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 158 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50198 deadline: 1733480381428, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:41,432 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:41,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 142 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50256 deadline: 1733480381429, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:41,432 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:41,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 151 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50200 deadline: 1733480381429, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:41,499 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:41,503 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241206ee774b3c4daa4227a91eabefa8397873_0a237205d558afc218e72c1705b7c48d to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241206ee774b3c4daa4227a91eabefa8397873_0a237205d558afc218e72c1705b7c48d 2024-12-06T10:18:41,503 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/.tmp/A/232d64516dcb407a838e66d666a08770, store: [table=TestAcidGuarantees family=A region=0a237205d558afc218e72c1705b7c48d] 2024-12-06T10:18:41,504 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/.tmp/A/232d64516dcb407a838e66d666a08770 is 175, key is test_row_0/A:col10/1733480321086/Put/seqid=0 2024-12-06T10:18:41,504 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:41,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 111 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50230 deadline: 1733480381500, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:41,506 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:41,506 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 108 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50246 deadline: 1733480381503, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:41,508 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742321_1497 (size=39949) 2024-12-06T10:18:41,522 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,33397,1733480204743 2024-12-06T10:18:41,522 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33397 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=119 2024-12-06T10:18:41,522 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d. 2024-12-06T10:18:41,522 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d. as already flushing 2024-12-06T10:18:41,522 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d. 2024-12-06T10:18:41,522 ERROR [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] handler.RSProcedureHandler(58): pid=119 java.io.IOException: Unable to complete flush {ENCODED => 0a237205d558afc218e72c1705b7c48d, NAME => 'TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:18:41,523 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=119 java.io.IOException: Unable to complete flush {ENCODED => 0a237205d558afc218e72c1705b7c48d, NAME => 'TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:18:41,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.HMaster(4114): Remote procedure failed, pid=119 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 0a237205d558afc218e72c1705b7c48d, NAME => 'TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 0a237205d558afc218e72c1705b7c48d, NAME => 'TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:18:41,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=118 2024-12-06T10:18:41,674 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,33397,1733480204743 2024-12-06T10:18:41,675 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33397 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=119 2024-12-06T10:18:41,675 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d. 2024-12-06T10:18:41,675 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d. as already flushing 2024-12-06T10:18:41,675 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d. 2024-12-06T10:18:41,675 ERROR [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] handler.RSProcedureHandler(58): pid=119 java.io.IOException: Unable to complete flush {ENCODED => 0a237205d558afc218e72c1705b7c48d, NAME => 'TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:18:41,675 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=119 java.io.IOException: Unable to complete flush {ENCODED => 0a237205d558afc218e72c1705b7c48d, NAME => 'TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:18:41,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.HMaster(4114): Remote procedure failed, pid=119 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 0a237205d558afc218e72c1705b7c48d, NAME => 'TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 0a237205d558afc218e72c1705b7c48d, NAME => 'TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:18:41,734 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:41,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 160 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50198 deadline: 1733480381732, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:41,738 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:41,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 144 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50256 deadline: 1733480381735, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:41,739 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:41,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 153 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50200 deadline: 1733480381735, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:41,827 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,33397,1733480204743 2024-12-06T10:18:41,827 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33397 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=119 2024-12-06T10:18:41,827 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d. 2024-12-06T10:18:41,828 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d. as already flushing 2024-12-06T10:18:41,828 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d. 2024-12-06T10:18:41,828 ERROR [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] handler.RSProcedureHandler(58): pid=119 java.io.IOException: Unable to complete flush {ENCODED => 0a237205d558afc218e72c1705b7c48d, NAME => 'TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:18:41,828 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=119 java.io.IOException: Unable to complete flush {ENCODED => 0a237205d558afc218e72c1705b7c48d, NAME => 'TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:18:41,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.HMaster(4114): Remote procedure failed, pid=119 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 0a237205d558afc218e72c1705b7c48d, NAME => 'TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 0a237205d558afc218e72c1705b7c48d, NAME => 'TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:18:41,908 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=316, memsize=38.0 K, hasBloomFilter=true, into tmp file hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/.tmp/A/232d64516dcb407a838e66d666a08770 2024-12-06T10:18:41,917 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/.tmp/B/1c546b28cc6441eda20faa18e01392bf is 50, key is test_row_0/B:col10/1733480321086/Put/seqid=0 2024-12-06T10:18:41,921 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742322_1498 (size=12301) 2024-12-06T10:18:41,979 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,33397,1733480204743 2024-12-06T10:18:41,979 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33397 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=119 2024-12-06T10:18:41,979 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d. 2024-12-06T10:18:41,979 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d. as already flushing 2024-12-06T10:18:41,980 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d. 2024-12-06T10:18:41,980 ERROR [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] handler.RSProcedureHandler(58): pid=119 java.io.IOException: Unable to complete flush {ENCODED => 0a237205d558afc218e72c1705b7c48d, NAME => 'TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:18:41,980 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=119 java.io.IOException: Unable to complete flush {ENCODED => 0a237205d558afc218e72c1705b7c48d, NAME => 'TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:18:41,980 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.HMaster(4114): Remote procedure failed, pid=119 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 0a237205d558afc218e72c1705b7c48d, NAME => 'TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 0a237205d558afc218e72c1705b7c48d, NAME => 'TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:18:42,132 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,33397,1733480204743 2024-12-06T10:18:42,132 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33397 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=119 2024-12-06T10:18:42,132 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d. 2024-12-06T10:18:42,132 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d. as already flushing 2024-12-06T10:18:42,132 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d. 2024-12-06T10:18:42,132 ERROR [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] handler.RSProcedureHandler(58): pid=119 java.io.IOException: Unable to complete flush {ENCODED => 0a237205d558afc218e72c1705b7c48d, NAME => 'TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:18:42,133 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=119 java.io.IOException: Unable to complete flush {ENCODED => 0a237205d558afc218e72c1705b7c48d, NAME => 'TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:18:42,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.HMaster(4114): Remote procedure failed, pid=119 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 0a237205d558afc218e72c1705b7c48d, NAME => 'TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 0a237205d558afc218e72c1705b7c48d, NAME => 'TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:18:42,167 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=118 2024-12-06T10:18:42,240 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:42,240 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 162 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50198 deadline: 1733480382236, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:42,244 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:42,244 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 146 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50256 deadline: 1733480382240, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:42,249 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:42,249 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 155 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50200 deadline: 1733480382245, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:42,285 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,33397,1733480204743 2024-12-06T10:18:42,285 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33397 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=119 2024-12-06T10:18:42,285 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d. 2024-12-06T10:18:42,285 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d. as already flushing 2024-12-06T10:18:42,285 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d. 2024-12-06T10:18:42,285 ERROR [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] handler.RSProcedureHandler(58): pid=119 java.io.IOException: Unable to complete flush {ENCODED => 0a237205d558afc218e72c1705b7c48d, NAME => 'TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:18:42,285 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=119 java.io.IOException: Unable to complete flush {ENCODED => 0a237205d558afc218e72c1705b7c48d, NAME => 'TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:18:42,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.HMaster(4114): Remote procedure failed, pid=119 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 0a237205d558afc218e72c1705b7c48d, NAME => 'TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 0a237205d558afc218e72c1705b7c48d, NAME => 'TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:18:42,322 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=316 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/.tmp/B/1c546b28cc6441eda20faa18e01392bf 2024-12-06T10:18:42,328 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/.tmp/C/e970362747124a67b0d76585f54df470 is 50, key is test_row_0/C:col10/1733480321086/Put/seqid=0 2024-12-06T10:18:42,333 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742323_1499 (size=12301) 2024-12-06T10:18:42,437 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,33397,1733480204743 2024-12-06T10:18:42,438 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33397 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=119 2024-12-06T10:18:42,438 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d. 2024-12-06T10:18:42,438 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d. as already flushing 2024-12-06T10:18:42,438 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d. 2024-12-06T10:18:42,438 ERROR [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] handler.RSProcedureHandler(58): pid=119 java.io.IOException: Unable to complete flush {ENCODED => 0a237205d558afc218e72c1705b7c48d, NAME => 'TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:18:42,438 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=119 java.io.IOException: Unable to complete flush {ENCODED => 0a237205d558afc218e72c1705b7c48d, NAME => 'TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:18:42,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.HMaster(4114): Remote procedure failed, pid=119 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 0a237205d558afc218e72c1705b7c48d, NAME => 'TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 0a237205d558afc218e72c1705b7c48d, NAME => 'TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:18:42,590 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,33397,1733480204743 2024-12-06T10:18:42,591 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33397 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=119 2024-12-06T10:18:42,591 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d. 2024-12-06T10:18:42,591 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d. as already flushing 2024-12-06T10:18:42,591 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d. 2024-12-06T10:18:42,591 ERROR [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] handler.RSProcedureHandler(58): pid=119 java.io.IOException: Unable to complete flush {ENCODED => 0a237205d558afc218e72c1705b7c48d, NAME => 'TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:18:42,591 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=119 java.io.IOException: Unable to complete flush {ENCODED => 0a237205d558afc218e72c1705b7c48d, NAME => 'TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:18:42,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.HMaster(4114): Remote procedure failed, pid=119 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 0a237205d558afc218e72c1705b7c48d, NAME => 'TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 0a237205d558afc218e72c1705b7c48d, NAME => 'TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:18:42,734 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=316 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/.tmp/C/e970362747124a67b0d76585f54df470 2024-12-06T10:18:42,740 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/.tmp/A/232d64516dcb407a838e66d666a08770 as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/A/232d64516dcb407a838e66d666a08770 2024-12-06T10:18:42,743 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,33397,1733480204743 2024-12-06T10:18:42,743 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33397 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=119 2024-12-06T10:18:42,744 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d. 2024-12-06T10:18:42,744 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d. as already flushing 2024-12-06T10:18:42,744 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d. 2024-12-06T10:18:42,744 ERROR [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] handler.RSProcedureHandler(58): pid=119 java.io.IOException: Unable to complete flush {ENCODED => 0a237205d558afc218e72c1705b7c48d, NAME => 'TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:18:42,744 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=119 java.io.IOException: Unable to complete flush {ENCODED => 0a237205d558afc218e72c1705b7c48d, NAME => 'TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:18:42,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.HMaster(4114): Remote procedure failed, pid=119 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 0a237205d558afc218e72c1705b7c48d, NAME => 'TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 0a237205d558afc218e72c1705b7c48d, NAME => 'TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:18:42,745 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/A/232d64516dcb407a838e66d666a08770, entries=200, sequenceid=316, filesize=39.0 K 2024-12-06T10:18:42,748 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/.tmp/B/1c546b28cc6441eda20faa18e01392bf as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/B/1c546b28cc6441eda20faa18e01392bf 2024-12-06T10:18:42,751 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/B/1c546b28cc6441eda20faa18e01392bf, entries=150, sequenceid=316, filesize=12.0 K 2024-12-06T10:18:42,752 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/.tmp/C/e970362747124a67b0d76585f54df470 as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/C/e970362747124a67b0d76585f54df470 2024-12-06T10:18:42,755 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/C/e970362747124a67b0d76585f54df470, entries=150, sequenceid=316, filesize=12.0 K 2024-12-06T10:18:42,756 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~114.05 KB/116790, heapSize ~299.53 KB/306720, currentSize=87.22 KB/89310 for 0a237205d558afc218e72c1705b7c48d in 1669ms, sequenceid=316, compaction requested=false 2024-12-06T10:18:42,756 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 0a237205d558afc218e72c1705b7c48d: 2024-12-06T10:18:42,896 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,33397,1733480204743 2024-12-06T10:18:42,896 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33397 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=119 2024-12-06T10:18:42,896 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d. 2024-12-06T10:18:42,896 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegion(2837): Flushing 0a237205d558afc218e72c1705b7c48d 3/3 column families, dataSize=87.22 KB heapSize=229.27 KB 2024-12-06T10:18:42,897 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0a237205d558afc218e72c1705b7c48d, store=A 2024-12-06T10:18:42,897 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:18:42,897 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0a237205d558afc218e72c1705b7c48d, store=B 2024-12-06T10:18:42,897 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:18:42,897 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0a237205d558afc218e72c1705b7c48d, store=C 2024-12-06T10:18:42,897 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:18:42,912 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241206856218747b0441d5911a8586c0dec0aa_0a237205d558afc218e72c1705b7c48d is 50, key is test_row_0/A:col10/1733480321119/Put/seqid=0 2024-12-06T10:18:42,916 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742324_1500 (size=12454) 2024-12-06T10:18:42,917 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:42,923 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241206856218747b0441d5911a8586c0dec0aa_0a237205d558afc218e72c1705b7c48d to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241206856218747b0441d5911a8586c0dec0aa_0a237205d558afc218e72c1705b7c48d 2024-12-06T10:18:42,925 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/.tmp/A/131354ff43ee4e1e8ad0b48cb20dd2b9, store: [table=TestAcidGuarantees family=A region=0a237205d558afc218e72c1705b7c48d] 2024-12-06T10:18:42,925 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/.tmp/A/131354ff43ee4e1e8ad0b48cb20dd2b9 is 175, key is test_row_0/A:col10/1733480321119/Put/seqid=0 2024-12-06T10:18:42,929 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742325_1501 (size=31255) 2024-12-06T10:18:42,929 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=332, memsize=29.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/.tmp/A/131354ff43ee4e1e8ad0b48cb20dd2b9 2024-12-06T10:18:42,937 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/.tmp/B/12ed812a38ab495eb3d758f69ca0a572 is 50, key is test_row_0/B:col10/1733480321119/Put/seqid=0 2024-12-06T10:18:42,940 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742326_1502 (size=12301) 2024-12-06T10:18:43,056 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-06T10:18:43,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=118 2024-12-06T10:18:43,249 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d. as already flushing 2024-12-06T10:18:43,249 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(8581): Flush requested on 0a237205d558afc218e72c1705b7c48d 2024-12-06T10:18:43,286 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:43,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 160 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50200 deadline: 1733480383279, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:43,286 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:43,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 155 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50256 deadline: 1733480383284, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:43,288 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:43,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 171 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50198 deadline: 1733480383286, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:43,341 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=332 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/.tmp/B/12ed812a38ab495eb3d758f69ca0a572 2024-12-06T10:18:43,349 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/.tmp/C/2020b9e60fdf4e868c77516dc25890ad is 50, key is test_row_0/C:col10/1733480321119/Put/seqid=0 2024-12-06T10:18:43,357 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742327_1503 (size=12301) 2024-12-06T10:18:43,358 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=332 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/.tmp/C/2020b9e60fdf4e868c77516dc25890ad 2024-12-06T10:18:43,363 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/.tmp/A/131354ff43ee4e1e8ad0b48cb20dd2b9 as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/A/131354ff43ee4e1e8ad0b48cb20dd2b9 2024-12-06T10:18:43,367 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/A/131354ff43ee4e1e8ad0b48cb20dd2b9, entries=150, sequenceid=332, filesize=30.5 K 2024-12-06T10:18:43,367 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/.tmp/B/12ed812a38ab495eb3d758f69ca0a572 as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/B/12ed812a38ab495eb3d758f69ca0a572 2024-12-06T10:18:43,371 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/B/12ed812a38ab495eb3d758f69ca0a572, entries=150, sequenceid=332, filesize=12.0 K 2024-12-06T10:18:43,372 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/.tmp/C/2020b9e60fdf4e868c77516dc25890ad as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/C/2020b9e60fdf4e868c77516dc25890ad 2024-12-06T10:18:43,375 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/C/2020b9e60fdf4e868c77516dc25890ad, entries=150, sequenceid=332, filesize=12.0 K 2024-12-06T10:18:43,376 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegion(3040): Finished flush of dataSize ~87.22 KB/89310, heapSize ~229.22 KB/234720, currentSize=114.05 KB/116790 for 0a237205d558afc218e72c1705b7c48d in 480ms, sequenceid=332, compaction requested=true 2024-12-06T10:18:43,376 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegion(2538): Flush status journal for 0a237205d558afc218e72c1705b7c48d: 2024-12-06T10:18:43,376 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d. 2024-12-06T10:18:43,376 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=119 2024-12-06T10:18:43,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.HMaster(4106): Remote procedure done, pid=119 2024-12-06T10:18:43,378 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=119, resume processing ppid=118 2024-12-06T10:18:43,378 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=119, ppid=118, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.3130 sec 2024-12-06T10:18:43,379 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=118, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=118, table=TestAcidGuarantees in 2.3160 sec 2024-12-06T10:18:43,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(8581): Flush requested on 0a237205d558afc218e72c1705b7c48d 2024-12-06T10:18:43,390 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 0a237205d558afc218e72c1705b7c48d 3/3 column families, dataSize=120.76 KB heapSize=317.16 KB 2024-12-06T10:18:43,391 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0a237205d558afc218e72c1705b7c48d, store=A 2024-12-06T10:18:43,391 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:18:43,391 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0a237205d558afc218e72c1705b7c48d, store=B 2024-12-06T10:18:43,391 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:18:43,391 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0a237205d558afc218e72c1705b7c48d, store=C 2024-12-06T10:18:43,391 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:18:43,397 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241206d30f46cc468a4d6a87d35d5aad33e651_0a237205d558afc218e72c1705b7c48d is 50, key is test_row_0/A:col10/1733480323278/Put/seqid=0 2024-12-06T10:18:43,404 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742328_1504 (size=17534) 2024-12-06T10:18:43,404 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:43,408 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241206d30f46cc468a4d6a87d35d5aad33e651_0a237205d558afc218e72c1705b7c48d to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241206d30f46cc468a4d6a87d35d5aad33e651_0a237205d558afc218e72c1705b7c48d 2024-12-06T10:18:43,409 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/.tmp/A/e45803a8a3bc48029a08256ebf94594c, store: [table=TestAcidGuarantees family=A region=0a237205d558afc218e72c1705b7c48d] 2024-12-06T10:18:43,410 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/.tmp/A/e45803a8a3bc48029a08256ebf94594c is 175, key is test_row_0/A:col10/1733480323278/Put/seqid=0 2024-12-06T10:18:43,414 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742329_1505 (size=48639) 2024-12-06T10:18:43,422 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:43,422 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 161 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50256 deadline: 1733480383415, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:43,425 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:43,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 177 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50198 deadline: 1733480383418, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:43,428 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:43,428 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 167 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50200 deadline: 1733480383422, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:43,522 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:43,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 113 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50230 deadline: 1733480383513, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:43,523 DEBUG [Thread-1811 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4157 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_2' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d., hostname=552d6a33fa09,33397,1733480204743, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-06T10:18:43,531 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:43,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 163 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50256 deadline: 1733480383523, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:43,531 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:43,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 110 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50246 deadline: 1733480383525, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:43,531 DEBUG [Thread-1813 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4166 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_1' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d., hostname=552d6a33fa09,33397,1733480204743, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-06T10:18:43,532 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:43,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 179 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50198 deadline: 1733480383527, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:43,536 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:43,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 169 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50200 deadline: 1733480383529, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:43,735 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:43,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 165 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50256 deadline: 1733480383733, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:43,738 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:43,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 181 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50198 deadline: 1733480383734, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:43,742 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:43,742 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 171 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50200 deadline: 1733480383738, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:43,815 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=354, memsize=42.5 K, hasBloomFilter=true, into tmp file hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/.tmp/A/e45803a8a3bc48029a08256ebf94594c 2024-12-06T10:18:43,822 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/.tmp/B/b4ec7d87654e4785a188761ef6e45c9c is 50, key is test_row_0/B:col10/1733480323278/Put/seqid=0 2024-12-06T10:18:43,825 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742330_1506 (size=12301) 2024-12-06T10:18:43,827 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=354 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/.tmp/B/b4ec7d87654e4785a188761ef6e45c9c 2024-12-06T10:18:43,834 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/.tmp/C/fe076c5bea9041d89aed7e7519a587cc is 50, key is test_row_0/C:col10/1733480323278/Put/seqid=0 2024-12-06T10:18:43,840 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742331_1507 (size=12301) 2024-12-06T10:18:43,846 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=354 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/.tmp/C/fe076c5bea9041d89aed7e7519a587cc 2024-12-06T10:18:43,851 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/.tmp/A/e45803a8a3bc48029a08256ebf94594c as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/A/e45803a8a3bc48029a08256ebf94594c 2024-12-06T10:18:43,855 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/A/e45803a8a3bc48029a08256ebf94594c, entries=250, sequenceid=354, filesize=47.5 K 2024-12-06T10:18:43,856 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/.tmp/B/b4ec7d87654e4785a188761ef6e45c9c as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/B/b4ec7d87654e4785a188761ef6e45c9c 2024-12-06T10:18:43,858 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/B/b4ec7d87654e4785a188761ef6e45c9c, entries=150, sequenceid=354, filesize=12.0 K 2024-12-06T10:18:43,859 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/.tmp/C/fe076c5bea9041d89aed7e7519a587cc as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/C/fe076c5bea9041d89aed7e7519a587cc 2024-12-06T10:18:43,863 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/C/fe076c5bea9041d89aed7e7519a587cc, entries=150, sequenceid=354, filesize=12.0 K 2024-12-06T10:18:43,863 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~127.47 KB/130530, heapSize ~334.69 KB/342720, currentSize=73.80 KB/75570 for 0a237205d558afc218e72c1705b7c48d in 473ms, sequenceid=354, compaction requested=true 2024-12-06T10:18:43,864 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 0a237205d558afc218e72c1705b7c48d: 2024-12-06T10:18:43,864 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-06T10:18:43,864 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 0a237205d558afc218e72c1705b7c48d:A, priority=-2147483648, current under compaction store size is 1 2024-12-06T10:18:43,864 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T10:18:43,864 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-06T10:18:43,864 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 0a237205d558afc218e72c1705b7c48d:B, priority=-2147483648, current under compaction store size is 2 2024-12-06T10:18:43,864 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T10:18:43,865 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 0a237205d558afc218e72c1705b7c48d:C, priority=-2147483648, current under compaction store size is 3 2024-12-06T10:18:43,865 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-06T10:18:43,865 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 151814 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-06T10:18:43,865 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HStore(1540): 0a237205d558afc218e72c1705b7c48d/A is initiating minor compaction (all files) 2024-12-06T10:18:43,865 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 0a237205d558afc218e72c1705b7c48d/A in TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d. 2024-12-06T10:18:43,865 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/A/ef08b294db7d4eda8438e88df7536d54, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/A/232d64516dcb407a838e66d666a08770, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/A/131354ff43ee4e1e8ad0b48cb20dd2b9, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/A/e45803a8a3bc48029a08256ebf94594c] into tmpdir=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/.tmp, totalSize=148.3 K 2024-12-06T10:18:43,865 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=12 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d. 2024-12-06T10:18:43,865 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d. files: [hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/A/ef08b294db7d4eda8438e88df7536d54, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/A/232d64516dcb407a838e66d666a08770, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/A/131354ff43ee4e1e8ad0b48cb20dd2b9, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/A/e45803a8a3bc48029a08256ebf94594c] 2024-12-06T10:18:43,865 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49920 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-06T10:18:43,865 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HStore(1540): 0a237205d558afc218e72c1705b7c48d/B is initiating minor compaction (all files) 2024-12-06T10:18:43,865 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 0a237205d558afc218e72c1705b7c48d/B in TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d. 2024-12-06T10:18:43,865 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/B/df59ae7c752643c192593d6af6c0927f, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/B/1c546b28cc6441eda20faa18e01392bf, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/B/12ed812a38ab495eb3d758f69ca0a572, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/B/b4ec7d87654e4785a188761ef6e45c9c] into tmpdir=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/.tmp, totalSize=48.8 K 2024-12-06T10:18:43,869 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.Compactor(224): Compacting ef08b294db7d4eda8438e88df7536d54, keycount=150, bloomtype=ROW, size=31.2 K, encoding=NONE, compression=NONE, seqNum=293, earliestPutTs=1733480318980 2024-12-06T10:18:43,870 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.Compactor(224): Compacting df59ae7c752643c192593d6af6c0927f, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=293, earliestPutTs=1733480318980 2024-12-06T10:18:43,870 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.Compactor(224): Compacting 232d64516dcb407a838e66d666a08770, keycount=200, bloomtype=ROW, size=39.0 K, encoding=NONE, compression=NONE, seqNum=316, earliestPutTs=1733480319333 2024-12-06T10:18:43,870 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.Compactor(224): Compacting 1c546b28cc6441eda20faa18e01392bf, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=316, earliestPutTs=1733480319341 2024-12-06T10:18:43,870 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.Compactor(224): Compacting 12ed812a38ab495eb3d758f69ca0a572, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=332, earliestPutTs=1733480321109 2024-12-06T10:18:43,870 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.Compactor(224): Compacting 131354ff43ee4e1e8ad0b48cb20dd2b9, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=332, earliestPutTs=1733480321109 2024-12-06T10:18:43,871 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.Compactor(224): Compacting b4ec7d87654e4785a188761ef6e45c9c, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=354, earliestPutTs=1733480323277 2024-12-06T10:18:43,871 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.Compactor(224): Compacting e45803a8a3bc48029a08256ebf94594c, keycount=250, bloomtype=ROW, size=47.5 K, encoding=NONE, compression=NONE, seqNum=354, earliestPutTs=1733480323277 2024-12-06T10:18:43,881 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=0a237205d558afc218e72c1705b7c48d] 2024-12-06T10:18:43,883 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 0a237205d558afc218e72c1705b7c48d#B#compaction#428 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-06T10:18:43,883 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/.tmp/B/aaf22d8ba67b4722bab4ece37053cc17 is 50, key is test_row_0/B:col10/1733480323278/Put/seqid=0 2024-12-06T10:18:43,892 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e202412062bf6a0d0a2b441788873d12a250223e8_0a237205d558afc218e72c1705b7c48d store=[table=TestAcidGuarantees family=A region=0a237205d558afc218e72c1705b7c48d] 2024-12-06T10:18:43,894 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e202412062bf6a0d0a2b441788873d12a250223e8_0a237205d558afc218e72c1705b7c48d, store=[table=TestAcidGuarantees family=A region=0a237205d558afc218e72c1705b7c48d] 2024-12-06T10:18:43,895 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412062bf6a0d0a2b441788873d12a250223e8_0a237205d558afc218e72c1705b7c48d because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=0a237205d558afc218e72c1705b7c48d] 2024-12-06T10:18:43,898 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742332_1508 (size=13153) 2024-12-06T10:18:43,905 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742333_1509 (size=4469) 2024-12-06T10:18:43,906 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 0a237205d558afc218e72c1705b7c48d#A#compaction#429 average throughput is 1.02 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-06T10:18:43,906 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/.tmp/A/35f0f781187e4a1680c65c504afb8c03 is 175, key is test_row_0/A:col10/1733480323278/Put/seqid=0 2024-12-06T10:18:43,909 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742334_1510 (size=32107) 2024-12-06T10:18:43,915 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/.tmp/A/35f0f781187e4a1680c65c504afb8c03 as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/A/35f0f781187e4a1680c65c504afb8c03 2024-12-06T10:18:43,918 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 0a237205d558afc218e72c1705b7c48d/A of 0a237205d558afc218e72c1705b7c48d into 35f0f781187e4a1680c65c504afb8c03(size=31.4 K), total size for store is 31.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-06T10:18:43,919 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 0a237205d558afc218e72c1705b7c48d: 2024-12-06T10:18:43,919 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d., storeName=0a237205d558afc218e72c1705b7c48d/A, priority=12, startTime=1733480323864; duration=0sec 2024-12-06T10:18:43,919 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-06T10:18:43,919 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 0a237205d558afc218e72c1705b7c48d:A 2024-12-06T10:18:43,919 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-06T10:18:43,920 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49920 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-06T10:18:43,920 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HStore(1540): 0a237205d558afc218e72c1705b7c48d/C is initiating minor compaction (all files) 2024-12-06T10:18:43,920 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 0a237205d558afc218e72c1705b7c48d/C in TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d. 2024-12-06T10:18:43,920 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/C/9ce62a3633ba4bb1a9d6d30c53aa690c, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/C/e970362747124a67b0d76585f54df470, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/C/2020b9e60fdf4e868c77516dc25890ad, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/C/fe076c5bea9041d89aed7e7519a587cc] into tmpdir=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/.tmp, totalSize=48.8 K 2024-12-06T10:18:43,920 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.Compactor(224): Compacting 9ce62a3633ba4bb1a9d6d30c53aa690c, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=293, earliestPutTs=1733480318980 2024-12-06T10:18:43,920 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.Compactor(224): Compacting e970362747124a67b0d76585f54df470, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=316, earliestPutTs=1733480319341 2024-12-06T10:18:43,921 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.Compactor(224): Compacting 2020b9e60fdf4e868c77516dc25890ad, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=332, earliestPutTs=1733480321109 2024-12-06T10:18:43,921 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.Compactor(224): Compacting fe076c5bea9041d89aed7e7519a587cc, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=354, earliestPutTs=1733480323277 2024-12-06T10:18:43,927 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 0a237205d558afc218e72c1705b7c48d#C#compaction#430 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-06T10:18:43,928 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/.tmp/C/80a06cf164bd4fb7aa55d088f45e1c79 is 50, key is test_row_0/C:col10/1733480323278/Put/seqid=0 2024-12-06T10:18:43,931 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742335_1511 (size=13153) 2024-12-06T10:18:43,935 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/.tmp/C/80a06cf164bd4fb7aa55d088f45e1c79 as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/C/80a06cf164bd4fb7aa55d088f45e1c79 2024-12-06T10:18:43,938 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 0a237205d558afc218e72c1705b7c48d/C of 0a237205d558afc218e72c1705b7c48d into 80a06cf164bd4fb7aa55d088f45e1c79(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-06T10:18:43,938 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 0a237205d558afc218e72c1705b7c48d: 2024-12-06T10:18:43,938 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d., storeName=0a237205d558afc218e72c1705b7c48d/C, priority=12, startTime=1733480323865; duration=0sec 2024-12-06T10:18:43,938 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T10:18:43,938 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 0a237205d558afc218e72c1705b7c48d:C 2024-12-06T10:18:44,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(8581): Flush requested on 0a237205d558afc218e72c1705b7c48d 2024-12-06T10:18:44,042 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 0a237205d558afc218e72c1705b7c48d 3/3 column families, dataSize=80.51 KB heapSize=211.69 KB 2024-12-06T10:18:44,043 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0a237205d558afc218e72c1705b7c48d, store=A 2024-12-06T10:18:44,043 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:18:44,043 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0a237205d558afc218e72c1705b7c48d, store=B 2024-12-06T10:18:44,043 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:18:44,043 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0a237205d558afc218e72c1705b7c48d, store=C 2024-12-06T10:18:44,043 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:18:44,050 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241206fc709d11a4864c3182c901bdc960b045_0a237205d558afc218e72c1705b7c48d is 50, key is test_row_0/A:col10/1733480323414/Put/seqid=0 2024-12-06T10:18:44,059 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742336_1512 (size=14994) 2024-12-06T10:18:44,060 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:44,066 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241206fc709d11a4864c3182c901bdc960b045_0a237205d558afc218e72c1705b7c48d to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241206fc709d11a4864c3182c901bdc960b045_0a237205d558afc218e72c1705b7c48d 2024-12-06T10:18:44,067 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/.tmp/A/b6b8bece5c3f440887e38cbfc5bd6645, store: [table=TestAcidGuarantees family=A region=0a237205d558afc218e72c1705b7c48d] 2024-12-06T10:18:44,068 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/.tmp/A/b6b8bece5c3f440887e38cbfc5bd6645 is 175, key is test_row_0/A:col10/1733480323414/Put/seqid=0 2024-12-06T10:18:44,073 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742337_1513 (size=39949) 2024-12-06T10:18:44,083 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:44,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 189 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50198 deadline: 1733480384076, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:44,088 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:44,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 174 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50256 deadline: 1733480384082, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:44,090 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:44,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 179 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50200 deadline: 1733480384083, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:44,187 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:44,188 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 191 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50198 deadline: 1733480384184, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:44,193 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:44,194 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 176 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50256 deadline: 1733480384189, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:44,196 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:44,196 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 181 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50200 deadline: 1733480384191, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:44,303 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/.tmp/B/aaf22d8ba67b4722bab4ece37053cc17 as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/B/aaf22d8ba67b4722bab4ece37053cc17 2024-12-06T10:18:44,307 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 0a237205d558afc218e72c1705b7c48d/B of 0a237205d558afc218e72c1705b7c48d into aaf22d8ba67b4722bab4ece37053cc17(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-06T10:18:44,307 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 0a237205d558afc218e72c1705b7c48d: 2024-12-06T10:18:44,307 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d., storeName=0a237205d558afc218e72c1705b7c48d/B, priority=12, startTime=1733480323864; duration=0sec 2024-12-06T10:18:44,307 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T10:18:44,307 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 0a237205d558afc218e72c1705b7c48d:B 2024-12-06T10:18:44,391 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:44,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 193 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50198 deadline: 1733480384390, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:44,398 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:44,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 178 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50256 deadline: 1733480384395, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:44,399 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:44,399 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 183 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50200 deadline: 1733480384397, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:44,474 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=371, memsize=26.8 K, hasBloomFilter=true, into tmp file hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/.tmp/A/b6b8bece5c3f440887e38cbfc5bd6645 2024-12-06T10:18:44,488 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/.tmp/B/3cd8225a40da4939807f2cc3ad8a06d5 is 50, key is test_row_0/B:col10/1733480323414/Put/seqid=0 2024-12-06T10:18:44,511 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742338_1514 (size=12301) 2024-12-06T10:18:44,511 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=371 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/.tmp/B/3cd8225a40da4939807f2cc3ad8a06d5 2024-12-06T10:18:44,522 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/.tmp/C/29297e316b5b4938b02f33d0b49947ad is 50, key is test_row_0/C:col10/1733480323414/Put/seqid=0 2024-12-06T10:18:44,563 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742339_1515 (size=12301) 2024-12-06T10:18:44,564 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=371 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/.tmp/C/29297e316b5b4938b02f33d0b49947ad 2024-12-06T10:18:44,570 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/.tmp/A/b6b8bece5c3f440887e38cbfc5bd6645 as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/A/b6b8bece5c3f440887e38cbfc5bd6645 2024-12-06T10:18:44,574 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/A/b6b8bece5c3f440887e38cbfc5bd6645, entries=200, sequenceid=371, filesize=39.0 K 2024-12-06T10:18:44,575 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/.tmp/B/3cd8225a40da4939807f2cc3ad8a06d5 as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/B/3cd8225a40da4939807f2cc3ad8a06d5 2024-12-06T10:18:44,581 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/B/3cd8225a40da4939807f2cc3ad8a06d5, entries=150, sequenceid=371, filesize=12.0 K 2024-12-06T10:18:44,582 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/.tmp/C/29297e316b5b4938b02f33d0b49947ad as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/C/29297e316b5b4938b02f33d0b49947ad 2024-12-06T10:18:44,587 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/C/29297e316b5b4938b02f33d0b49947ad, entries=150, sequenceid=371, filesize=12.0 K 2024-12-06T10:18:44,588 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~80.51 KB/82440, heapSize ~211.64 KB/216720, currentSize=120.76 KB/123660 for 0a237205d558afc218e72c1705b7c48d in 546ms, sequenceid=371, compaction requested=false 2024-12-06T10:18:44,589 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 0a237205d558afc218e72c1705b7c48d: 2024-12-06T10:18:44,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(8581): Flush requested on 0a237205d558afc218e72c1705b7c48d 2024-12-06T10:18:44,698 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 0a237205d558afc218e72c1705b7c48d 3/3 column families, dataSize=127.47 KB heapSize=334.73 KB 2024-12-06T10:18:44,698 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0a237205d558afc218e72c1705b7c48d, store=A 2024-12-06T10:18:44,698 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:18:44,698 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0a237205d558afc218e72c1705b7c48d, store=B 2024-12-06T10:18:44,698 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:18:44,698 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0a237205d558afc218e72c1705b7c48d, store=C 2024-12-06T10:18:44,698 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:18:44,705 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412060f943210e2fc4ee19831f7a75def70ee_0a237205d558afc218e72c1705b7c48d is 50, key is test_row_0/A:col10/1733480324081/Put/seqid=0 2024-12-06T10:18:44,711 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742340_1516 (size=14994) 2024-12-06T10:18:44,722 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:44,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 200 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50198 deadline: 1733480384718, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:44,728 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:44,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 188 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50200 deadline: 1733480384721, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:44,728 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:44,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 184 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50256 deadline: 1733480384722, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:44,827 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:44,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 202 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50198 deadline: 1733480384823, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:44,836 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:44,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 190 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50200 deadline: 1733480384829, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:44,836 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:44,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 186 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50256 deadline: 1733480384830, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:45,035 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:45,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 204 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50198 deadline: 1733480385029, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:45,044 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:45,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 192 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50200 deadline: 1733480385037, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:45,044 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:45,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 188 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50256 deadline: 1733480385037, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:45,112 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:45,116 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412060f943210e2fc4ee19831f7a75def70ee_0a237205d558afc218e72c1705b7c48d to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412060f943210e2fc4ee19831f7a75def70ee_0a237205d558afc218e72c1705b7c48d 2024-12-06T10:18:45,117 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/.tmp/A/10121f7dcd1a4848bb92f33f373a0f44, store: [table=TestAcidGuarantees family=A region=0a237205d558afc218e72c1705b7c48d] 2024-12-06T10:18:45,118 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/.tmp/A/10121f7dcd1a4848bb92f33f373a0f44 is 175, key is test_row_0/A:col10/1733480324081/Put/seqid=0 2024-12-06T10:18:45,123 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742341_1517 (size=39949) 2024-12-06T10:18:45,169 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=118 2024-12-06T10:18:45,169 INFO [Thread-1817 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 118 completed 2024-12-06T10:18:45,171 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-06T10:18:45,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] procedure2.ProcedureExecutor(1098): Stored pid=120, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=120, table=TestAcidGuarantees 2024-12-06T10:18:45,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=120 2024-12-06T10:18:45,173 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=120, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=120, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-06T10:18:45,173 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=120, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=120, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-06T10:18:45,173 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=121, ppid=120, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-06T10:18:45,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=120 2024-12-06T10:18:45,325 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,33397,1733480204743 2024-12-06T10:18:45,325 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33397 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=121 2024-12-06T10:18:45,325 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d. 2024-12-06T10:18:45,326 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d. as already flushing 2024-12-06T10:18:45,326 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d. 2024-12-06T10:18:45,326 ERROR [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] handler.RSProcedureHandler(58): pid=121 java.io.IOException: Unable to complete flush {ENCODED => 0a237205d558afc218e72c1705b7c48d, NAME => 'TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:18:45,326 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=121 java.io.IOException: Unable to complete flush {ENCODED => 0a237205d558afc218e72c1705b7c48d, NAME => 'TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:18:45,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.HMaster(4114): Remote procedure failed, pid=121 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 0a237205d558afc218e72c1705b7c48d, NAME => 'TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 0a237205d558afc218e72c1705b7c48d, NAME => 'TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:18:45,341 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:45,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 206 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50198 deadline: 1733480385337, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:45,352 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:45,352 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 194 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50200 deadline: 1733480385346, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:45,352 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:45,352 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 190 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50256 deadline: 1733480385347, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:45,402 DEBUG [Thread-1822 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x683f8469 to 127.0.0.1:61610 2024-12-06T10:18:45,402 DEBUG [Thread-1822 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-06T10:18:45,403 DEBUG [Thread-1824 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x75e4d3d0 to 127.0.0.1:61610 2024-12-06T10:18:45,403 DEBUG [Thread-1824 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-06T10:18:45,408 DEBUG [Thread-1820 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x1f49665c to 127.0.0.1:61610 2024-12-06T10:18:45,408 DEBUG [Thread-1820 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-06T10:18:45,408 DEBUG [Thread-1818 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x5aee939b to 127.0.0.1:61610 2024-12-06T10:18:45,408 DEBUG [Thread-1818 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-06T10:18:45,410 DEBUG [Thread-1826 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x2b308f62 to 127.0.0.1:61610 2024-12-06T10:18:45,410 DEBUG [Thread-1826 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-06T10:18:45,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=120 2024-12-06T10:18:45,478 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,33397,1733480204743 2024-12-06T10:18:45,478 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33397 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=121 2024-12-06T10:18:45,478 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d. 2024-12-06T10:18:45,478 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d. as already flushing 2024-12-06T10:18:45,478 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d. 2024-12-06T10:18:45,479 ERROR [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] handler.RSProcedureHandler(58): pid=121 java.io.IOException: Unable to complete flush {ENCODED => 0a237205d558afc218e72c1705b7c48d, NAME => 'TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:18:45,479 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=121 java.io.IOException: Unable to complete flush {ENCODED => 0a237205d558afc218e72c1705b7c48d, NAME => 'TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:18:45,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.HMaster(4114): Remote procedure failed, pid=121 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 0a237205d558afc218e72c1705b7c48d, NAME => 'TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 0a237205d558afc218e72c1705b7c48d, NAME => 'TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:18:45,523 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=394, memsize=42.5 K, hasBloomFilter=true, into tmp file hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/.tmp/A/10121f7dcd1a4848bb92f33f373a0f44 2024-12-06T10:18:45,529 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/.tmp/B/b401e20bf4cb481db819cf0a841fc922 is 50, key is test_row_0/B:col10/1733480324081/Put/seqid=0 2024-12-06T10:18:45,532 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742342_1518 (size=12301) 2024-12-06T10:18:45,631 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,33397,1733480204743 2024-12-06T10:18:45,631 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33397 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=121 2024-12-06T10:18:45,631 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d. 2024-12-06T10:18:45,631 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d. as already flushing 2024-12-06T10:18:45,631 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d. 2024-12-06T10:18:45,631 ERROR [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] handler.RSProcedureHandler(58): pid=121 java.io.IOException: Unable to complete flush {ENCODED => 0a237205d558afc218e72c1705b7c48d, NAME => 'TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:18:45,631 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=121 java.io.IOException: Unable to complete flush {ENCODED => 0a237205d558afc218e72c1705b7c48d, NAME => 'TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:18:45,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.HMaster(4114): Remote procedure failed, pid=121 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 0a237205d558afc218e72c1705b7c48d, NAME => 'TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 0a237205d558afc218e72c1705b7c48d, NAME => 'TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:18:45,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=120 2024-12-06T10:18:45,783 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,33397,1733480204743 2024-12-06T10:18:45,783 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33397 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=121 2024-12-06T10:18:45,784 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d. 2024-12-06T10:18:45,784 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d. as already flushing 2024-12-06T10:18:45,784 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d. 2024-12-06T10:18:45,784 ERROR [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] handler.RSProcedureHandler(58): pid=121 java.io.IOException: Unable to complete flush {ENCODED => 0a237205d558afc218e72c1705b7c48d, NAME => 'TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:18:45,784 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=121 java.io.IOException: Unable to complete flush {ENCODED => 0a237205d558afc218e72c1705b7c48d, NAME => 'TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:18:45,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.HMaster(4114): Remote procedure failed, pid=121 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 0a237205d558afc218e72c1705b7c48d, NAME => 'TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 0a237205d558afc218e72c1705b7c48d, NAME => 'TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:18:45,843 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:45,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 208 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50198 deadline: 1733480385842, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:45,854 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:45,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 192 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50256 deadline: 1733480385854, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:45,856 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:45,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 196 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50200 deadline: 1733480385856, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:45,933 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=394 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/.tmp/B/b401e20bf4cb481db819cf0a841fc922 2024-12-06T10:18:45,935 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,33397,1733480204743 2024-12-06T10:18:45,936 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33397 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=121 2024-12-06T10:18:45,936 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d. 2024-12-06T10:18:45,936 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d. as already flushing 2024-12-06T10:18:45,936 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d. 2024-12-06T10:18:45,936 ERROR [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] handler.RSProcedureHandler(58): pid=121 java.io.IOException: Unable to complete flush {ENCODED => 0a237205d558afc218e72c1705b7c48d, NAME => 'TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:18:45,936 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=121 java.io.IOException: Unable to complete flush {ENCODED => 0a237205d558afc218e72c1705b7c48d, NAME => 'TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:18:45,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.HMaster(4114): Remote procedure failed, pid=121 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 0a237205d558afc218e72c1705b7c48d, NAME => 'TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 0a237205d558afc218e72c1705b7c48d, NAME => 'TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:18:45,939 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/.tmp/C/bb0654dae3074850afe508ec737342a2 is 50, key is test_row_0/C:col10/1733480324081/Put/seqid=0 2024-12-06T10:18:45,942 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742343_1519 (size=12301) 2024-12-06T10:18:46,088 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,33397,1733480204743 2024-12-06T10:18:46,088 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33397 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=121 2024-12-06T10:18:46,089 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d. 2024-12-06T10:18:46,089 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d. as already flushing 2024-12-06T10:18:46,089 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d. 2024-12-06T10:18:46,089 ERROR [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] handler.RSProcedureHandler(58): pid=121 java.io.IOException: Unable to complete flush {ENCODED => 0a237205d558afc218e72c1705b7c48d, NAME => 'TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:18:46,089 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=121 java.io.IOException: Unable to complete flush {ENCODED => 0a237205d558afc218e72c1705b7c48d, NAME => 'TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:18:46,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.HMaster(4114): Remote procedure failed, pid=121 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 0a237205d558afc218e72c1705b7c48d, NAME => 'TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 0a237205d558afc218e72c1705b7c48d, NAME => 'TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:18:46,240 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,33397,1733480204743 2024-12-06T10:18:46,241 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33397 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=121 2024-12-06T10:18:46,241 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d. 2024-12-06T10:18:46,241 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d. as already flushing 2024-12-06T10:18:46,241 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d. 2024-12-06T10:18:46,241 ERROR [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] handler.RSProcedureHandler(58): pid=121 java.io.IOException: Unable to complete flush {ENCODED => 0a237205d558afc218e72c1705b7c48d, NAME => 'TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:18:46,241 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=121 java.io.IOException: Unable to complete flush {ENCODED => 0a237205d558afc218e72c1705b7c48d, NAME => 'TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:18:46,242 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.HMaster(4114): Remote procedure failed, pid=121 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 0a237205d558afc218e72c1705b7c48d, NAME => 'TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 0a237205d558afc218e72c1705b7c48d, NAME => 'TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:18:46,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=120 2024-12-06T10:18:46,342 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=394 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/.tmp/C/bb0654dae3074850afe508ec737342a2 2024-12-06T10:18:46,346 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/.tmp/A/10121f7dcd1a4848bb92f33f373a0f44 as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/A/10121f7dcd1a4848bb92f33f373a0f44 2024-12-06T10:18:46,349 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/A/10121f7dcd1a4848bb92f33f373a0f44, entries=200, sequenceid=394, filesize=39.0 K 2024-12-06T10:18:46,349 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/.tmp/B/b401e20bf4cb481db819cf0a841fc922 as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/B/b401e20bf4cb481db819cf0a841fc922 2024-12-06T10:18:46,352 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/B/b401e20bf4cb481db819cf0a841fc922, entries=150, sequenceid=394, filesize=12.0 K 2024-12-06T10:18:46,352 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/.tmp/C/bb0654dae3074850afe508ec737342a2 as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/C/bb0654dae3074850afe508ec737342a2 2024-12-06T10:18:46,355 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/C/bb0654dae3074850afe508ec737342a2, entries=150, sequenceid=394, filesize=12.0 K 2024-12-06T10:18:46,356 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~127.47 KB/130530, heapSize ~334.69 KB/342720, currentSize=73.80 KB/75570 for 0a237205d558afc218e72c1705b7c48d in 1658ms, sequenceid=394, compaction requested=true 2024-12-06T10:18:46,356 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 0a237205d558afc218e72c1705b7c48d: 2024-12-06T10:18:46,356 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 0a237205d558afc218e72c1705b7c48d:A, priority=-2147483648, current under compaction store size is 1 2024-12-06T10:18:46,356 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T10:18:46,356 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-06T10:18:46,356 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 0a237205d558afc218e72c1705b7c48d:B, priority=-2147483648, current under compaction store size is 2 2024-12-06T10:18:46,356 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T10:18:46,356 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-06T10:18:46,356 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 0a237205d558afc218e72c1705b7c48d:C, priority=-2147483648, current under compaction store size is 3 2024-12-06T10:18:46,356 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-06T10:18:46,357 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 112005 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-06T10:18:46,357 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37755 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-06T10:18:46,357 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HStore(1540): 0a237205d558afc218e72c1705b7c48d/A is initiating minor compaction (all files) 2024-12-06T10:18:46,357 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HStore(1540): 0a237205d558afc218e72c1705b7c48d/B is initiating minor compaction (all files) 2024-12-06T10:18:46,357 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 0a237205d558afc218e72c1705b7c48d/A in TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d. 2024-12-06T10:18:46,357 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 0a237205d558afc218e72c1705b7c48d/B in TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d. 2024-12-06T10:18:46,357 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/A/35f0f781187e4a1680c65c504afb8c03, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/A/b6b8bece5c3f440887e38cbfc5bd6645, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/A/10121f7dcd1a4848bb92f33f373a0f44] into tmpdir=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/.tmp, totalSize=109.4 K 2024-12-06T10:18:46,357 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/B/aaf22d8ba67b4722bab4ece37053cc17, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/B/3cd8225a40da4939807f2cc3ad8a06d5, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/B/b401e20bf4cb481db819cf0a841fc922] into tmpdir=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/.tmp, totalSize=36.9 K 2024-12-06T10:18:46,357 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d. 2024-12-06T10:18:46,357 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d. files: [hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/A/35f0f781187e4a1680c65c504afb8c03, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/A/b6b8bece5c3f440887e38cbfc5bd6645, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/A/10121f7dcd1a4848bb92f33f373a0f44] 2024-12-06T10:18:46,357 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.Compactor(224): Compacting aaf22d8ba67b4722bab4ece37053cc17, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=354, earliestPutTs=1733480323277 2024-12-06T10:18:46,357 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.Compactor(224): Compacting 35f0f781187e4a1680c65c504afb8c03, keycount=150, bloomtype=ROW, size=31.4 K, encoding=NONE, compression=NONE, seqNum=354, earliestPutTs=1733480323277 2024-12-06T10:18:46,358 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.Compactor(224): Compacting 3cd8225a40da4939807f2cc3ad8a06d5, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=371, earliestPutTs=1733480323414 2024-12-06T10:18:46,358 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.Compactor(224): Compacting b6b8bece5c3f440887e38cbfc5bd6645, keycount=200, bloomtype=ROW, size=39.0 K, encoding=NONE, compression=NONE, seqNum=371, earliestPutTs=1733480323414 2024-12-06T10:18:46,358 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.Compactor(224): Compacting b401e20bf4cb481db819cf0a841fc922, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=394, earliestPutTs=1733480324081 2024-12-06T10:18:46,358 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.Compactor(224): Compacting 10121f7dcd1a4848bb92f33f373a0f44, keycount=200, bloomtype=ROW, size=39.0 K, encoding=NONE, compression=NONE, seqNum=394, earliestPutTs=1733480324060 2024-12-06T10:18:46,363 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=0a237205d558afc218e72c1705b7c48d] 2024-12-06T10:18:46,371 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 0a237205d558afc218e72c1705b7c48d#B#compaction#438 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-06T10:18:46,372 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/.tmp/B/588798a1ab07434c83ee80fb4e651739 is 50, key is test_row_0/B:col10/1733480324081/Put/seqid=0 2024-12-06T10:18:46,374 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e202412066349f4907d73477cb5427159d9398f37_0a237205d558afc218e72c1705b7c48d store=[table=TestAcidGuarantees family=A region=0a237205d558afc218e72c1705b7c48d] 2024-12-06T10:18:46,378 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e202412066349f4907d73477cb5427159d9398f37_0a237205d558afc218e72c1705b7c48d, store=[table=TestAcidGuarantees family=A region=0a237205d558afc218e72c1705b7c48d] 2024-12-06T10:18:46,378 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412066349f4907d73477cb5427159d9398f37_0a237205d558afc218e72c1705b7c48d because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=0a237205d558afc218e72c1705b7c48d] 2024-12-06T10:18:46,380 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742344_1520 (size=13255) 2024-12-06T10:18:46,382 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742345_1521 (size=4469) 2024-12-06T10:18:46,393 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,33397,1733480204743 2024-12-06T10:18:46,393 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33397 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=121 2024-12-06T10:18:46,394 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d. 2024-12-06T10:18:46,394 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HRegion(2837): Flushing 0a237205d558afc218e72c1705b7c48d 3/3 column families, dataSize=73.80 KB heapSize=194.11 KB 2024-12-06T10:18:46,394 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0a237205d558afc218e72c1705b7c48d, store=A 2024-12-06T10:18:46,394 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:18:46,394 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0a237205d558afc218e72c1705b7c48d, store=B 2024-12-06T10:18:46,394 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:18:46,394 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0a237205d558afc218e72c1705b7c48d, store=C 2024-12-06T10:18:46,394 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:18:46,400 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241206cae6c3ecb3024e29881527172c715ac3_0a237205d558afc218e72c1705b7c48d is 50, key is test_row_0/A:col10/1733480324721/Put/seqid=0 2024-12-06T10:18:46,404 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742346_1522 (size=12454) 2024-12-06T10:18:46,404 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:46,407 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241206cae6c3ecb3024e29881527172c715ac3_0a237205d558afc218e72c1705b7c48d to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241206cae6c3ecb3024e29881527172c715ac3_0a237205d558afc218e72c1705b7c48d 2024-12-06T10:18:46,408 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/.tmp/A/d58b51853e9b47369422561c8f46ceb0, store: [table=TestAcidGuarantees family=A region=0a237205d558afc218e72c1705b7c48d] 2024-12-06T10:18:46,408 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/.tmp/A/d58b51853e9b47369422561c8f46ceb0 is 175, key is test_row_0/A:col10/1733480324721/Put/seqid=0 2024-12-06T10:18:46,414 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742347_1523 (size=31255) 2024-12-06T10:18:46,784 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 0a237205d558afc218e72c1705b7c48d#A#compaction#437 average throughput is 0.06 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-06T10:18:46,784 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/.tmp/A/f796daa1863d474a959df29010d262cb is 175, key is test_row_0/A:col10/1733480324081/Put/seqid=0 2024-12-06T10:18:46,785 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/.tmp/B/588798a1ab07434c83ee80fb4e651739 as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/B/588798a1ab07434c83ee80fb4e651739 2024-12-06T10:18:46,787 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742348_1524 (size=32209) 2024-12-06T10:18:46,789 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 0a237205d558afc218e72c1705b7c48d/B of 0a237205d558afc218e72c1705b7c48d into 588798a1ab07434c83ee80fb4e651739(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-06T10:18:46,789 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 0a237205d558afc218e72c1705b7c48d: 2024-12-06T10:18:46,789 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d., storeName=0a237205d558afc218e72c1705b7c48d/B, priority=13, startTime=1733480326356; duration=0sec 2024-12-06T10:18:46,789 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-06T10:18:46,790 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 0a237205d558afc218e72c1705b7c48d:B 2024-12-06T10:18:46,790 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-06T10:18:46,791 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37755 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-06T10:18:46,791 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HStore(1540): 0a237205d558afc218e72c1705b7c48d/C is initiating minor compaction (all files) 2024-12-06T10:18:46,791 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 0a237205d558afc218e72c1705b7c48d/C in TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d. 2024-12-06T10:18:46,792 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/C/80a06cf164bd4fb7aa55d088f45e1c79, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/C/29297e316b5b4938b02f33d0b49947ad, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/C/bb0654dae3074850afe508ec737342a2] into tmpdir=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/.tmp, totalSize=36.9 K 2024-12-06T10:18:46,792 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/.tmp/A/f796daa1863d474a959df29010d262cb as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/A/f796daa1863d474a959df29010d262cb 2024-12-06T10:18:46,792 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.Compactor(224): Compacting 80a06cf164bd4fb7aa55d088f45e1c79, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=354, earliestPutTs=1733480323277 2024-12-06T10:18:46,793 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.Compactor(224): Compacting 29297e316b5b4938b02f33d0b49947ad, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=371, earliestPutTs=1733480323414 2024-12-06T10:18:46,793 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.Compactor(224): Compacting bb0654dae3074850afe508ec737342a2, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=394, earliestPutTs=1733480324081 2024-12-06T10:18:46,800 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 0a237205d558afc218e72c1705b7c48d/A of 0a237205d558afc218e72c1705b7c48d into f796daa1863d474a959df29010d262cb(size=31.5 K), total size for store is 31.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-06T10:18:46,800 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 0a237205d558afc218e72c1705b7c48d: 2024-12-06T10:18:46,800 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d., storeName=0a237205d558afc218e72c1705b7c48d/A, priority=13, startTime=1733480326356; duration=0sec 2024-12-06T10:18:46,801 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T10:18:46,801 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 0a237205d558afc218e72c1705b7c48d:A 2024-12-06T10:18:46,801 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 0a237205d558afc218e72c1705b7c48d#C#compaction#440 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-06T10:18:46,802 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/.tmp/C/751ef058754b47909136b09ac6e9c246 is 50, key is test_row_0/C:col10/1733480324081/Put/seqid=0 2024-12-06T10:18:46,805 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742349_1525 (size=13255) 2024-12-06T10:18:46,815 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=408, memsize=24.6 K, hasBloomFilter=true, into tmp file hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/.tmp/A/d58b51853e9b47369422561c8f46ceb0 2024-12-06T10:18:46,820 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/.tmp/B/d5cc8235d44c4f28b81d11ce2893b0dd is 50, key is test_row_0/B:col10/1733480324721/Put/seqid=0 2024-12-06T10:18:46,822 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742350_1526 (size=12301) 2024-12-06T10:18:46,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(8581): Flush requested on 0a237205d558afc218e72c1705b7c48d 2024-12-06T10:18:46,851 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d. as already flushing 2024-12-06T10:18:46,851 DEBUG [Thread-1815 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x3c336ea4 to 127.0.0.1:61610 2024-12-06T10:18:46,851 DEBUG [Thread-1815 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-06T10:18:46,866 DEBUG [Thread-1807 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x1b82ba2a to 127.0.0.1:61610 2024-12-06T10:18:46,866 DEBUG [Thread-1807 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-06T10:18:46,866 DEBUG [Thread-1809 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x7b6cf8cb to 127.0.0.1:61610 2024-12-06T10:18:46,866 DEBUG [Thread-1809 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-06T10:18:47,033 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/552d6a33fa09:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] regionserver.HStore(2316): Moving the files [hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/A/2619bbf0a5c94c97ba5cf22caccbb10f, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/A/99abc431051e4d3cbfb7c8e44b061395, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/A/b1b0d6f832694aae8518919b72babed6, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/A/aa0608f1c1c840f8ba374febde507b28, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/A/7174e8c57b8e4ec1a046fb66a6ae9ca8, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/A/42b096ea0afb4e8b8a7ccddc336f389e, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/A/4cad0455c29a410aa7a4c724608055f7, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/A/af998095b4434beeb9bfd8c386e5b100, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/A/b6166a50349748668c947c2dc8331b00, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/A/30db58433fe7434aaa311b5dbb8e2f8b, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/A/9ce6669ea95141aea87c775e38510cfd, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/A/0edf375441ff42828c53d5db6594c339, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/A/fa91e4bc7549434dafee949bc33a35fa, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/A/e86d9f85a826411799219b878d5bf9bb, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/A/783219202240414ea068d6c1a8b5c3c4, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/A/0ffb75d39726404fa8029068f3efcc72, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/A/aee8450be2c04ba2a4eca27ba844fc96, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/A/c2db1dd4daf645499eb818d5d8d2e6ee, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/A/e00e3057332b479c8d33b5ee9d7d5771, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/A/a67e947d0b05464aa3e8fdfc60f632d9, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/A/ef08b294db7d4eda8438e88df7536d54, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/A/1d817db982704c2c992ff5763db74ee8, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/A/232d64516dcb407a838e66d666a08770, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/A/131354ff43ee4e1e8ad0b48cb20dd2b9, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/A/e45803a8a3bc48029a08256ebf94594c, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/A/35f0f781187e4a1680c65c504afb8c03, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/A/b6b8bece5c3f440887e38cbfc5bd6645, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/A/10121f7dcd1a4848bb92f33f373a0f44] to archive 2024-12-06T10:18:47,034 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/552d6a33fa09:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(360): Archiving compacted files. 2024-12-06T10:18:47,035 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/552d6a33fa09:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/A/2619bbf0a5c94c97ba5cf22caccbb10f to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/A/2619bbf0a5c94c97ba5cf22caccbb10f 2024-12-06T10:18:47,036 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/552d6a33fa09:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/A/99abc431051e4d3cbfb7c8e44b061395 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/A/99abc431051e4d3cbfb7c8e44b061395 2024-12-06T10:18:47,037 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/552d6a33fa09:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/A/b1b0d6f832694aae8518919b72babed6 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/A/b1b0d6f832694aae8518919b72babed6 2024-12-06T10:18:47,038 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/552d6a33fa09:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/A/aa0608f1c1c840f8ba374febde507b28 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/A/aa0608f1c1c840f8ba374febde507b28 2024-12-06T10:18:47,040 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/552d6a33fa09:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/A/7174e8c57b8e4ec1a046fb66a6ae9ca8 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/A/7174e8c57b8e4ec1a046fb66a6ae9ca8 2024-12-06T10:18:47,041 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/552d6a33fa09:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/A/42b096ea0afb4e8b8a7ccddc336f389e to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/A/42b096ea0afb4e8b8a7ccddc336f389e 2024-12-06T10:18:47,042 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/552d6a33fa09:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/A/4cad0455c29a410aa7a4c724608055f7 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/A/4cad0455c29a410aa7a4c724608055f7 2024-12-06T10:18:47,043 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/552d6a33fa09:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/A/af998095b4434beeb9bfd8c386e5b100 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/A/af998095b4434beeb9bfd8c386e5b100 2024-12-06T10:18:47,044 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/552d6a33fa09:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/A/b6166a50349748668c947c2dc8331b00 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/A/b6166a50349748668c947c2dc8331b00 2024-12-06T10:18:47,045 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/552d6a33fa09:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/A/30db58433fe7434aaa311b5dbb8e2f8b to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/A/30db58433fe7434aaa311b5dbb8e2f8b 2024-12-06T10:18:47,046 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/552d6a33fa09:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/A/9ce6669ea95141aea87c775e38510cfd to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/A/9ce6669ea95141aea87c775e38510cfd 2024-12-06T10:18:47,046 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/552d6a33fa09:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/A/0edf375441ff42828c53d5db6594c339 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/A/0edf375441ff42828c53d5db6594c339 2024-12-06T10:18:47,047 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/552d6a33fa09:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/A/fa91e4bc7549434dafee949bc33a35fa to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/A/fa91e4bc7549434dafee949bc33a35fa 2024-12-06T10:18:47,048 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/552d6a33fa09:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/A/e86d9f85a826411799219b878d5bf9bb to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/A/e86d9f85a826411799219b878d5bf9bb 2024-12-06T10:18:47,049 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/552d6a33fa09:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/A/783219202240414ea068d6c1a8b5c3c4 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/A/783219202240414ea068d6c1a8b5c3c4 2024-12-06T10:18:47,050 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/552d6a33fa09:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/A/0ffb75d39726404fa8029068f3efcc72 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/A/0ffb75d39726404fa8029068f3efcc72 2024-12-06T10:18:47,050 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/552d6a33fa09:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/A/aee8450be2c04ba2a4eca27ba844fc96 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/A/aee8450be2c04ba2a4eca27ba844fc96 2024-12-06T10:18:47,051 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/552d6a33fa09:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/A/c2db1dd4daf645499eb818d5d8d2e6ee to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/A/c2db1dd4daf645499eb818d5d8d2e6ee 2024-12-06T10:18:47,052 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/552d6a33fa09:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/A/e00e3057332b479c8d33b5ee9d7d5771 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/A/e00e3057332b479c8d33b5ee9d7d5771 2024-12-06T10:18:47,053 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/552d6a33fa09:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/A/a67e947d0b05464aa3e8fdfc60f632d9 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/A/a67e947d0b05464aa3e8fdfc60f632d9 2024-12-06T10:18:47,054 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/552d6a33fa09:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/A/ef08b294db7d4eda8438e88df7536d54 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/A/ef08b294db7d4eda8438e88df7536d54 2024-12-06T10:18:47,055 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/552d6a33fa09:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/A/1d817db982704c2c992ff5763db74ee8 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/A/1d817db982704c2c992ff5763db74ee8 2024-12-06T10:18:47,056 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/552d6a33fa09:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/A/232d64516dcb407a838e66d666a08770 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/A/232d64516dcb407a838e66d666a08770 2024-12-06T10:18:47,056 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/552d6a33fa09:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/A/131354ff43ee4e1e8ad0b48cb20dd2b9 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/A/131354ff43ee4e1e8ad0b48cb20dd2b9 2024-12-06T10:18:47,057 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/552d6a33fa09:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/A/e45803a8a3bc48029a08256ebf94594c to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/A/e45803a8a3bc48029a08256ebf94594c 2024-12-06T10:18:47,058 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/552d6a33fa09:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/A/35f0f781187e4a1680c65c504afb8c03 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/A/35f0f781187e4a1680c65c504afb8c03 2024-12-06T10:18:47,059 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/552d6a33fa09:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/A/b6b8bece5c3f440887e38cbfc5bd6645 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/A/b6b8bece5c3f440887e38cbfc5bd6645 2024-12-06T10:18:47,060 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/552d6a33fa09:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/A/10121f7dcd1a4848bb92f33f373a0f44 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/A/10121f7dcd1a4848bb92f33f373a0f44 2024-12-06T10:18:47,063 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/552d6a33fa09:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] regionserver.HStore(2316): Moving the files [hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/B/c5db907d1a3848aead612ec9bbcaf145, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/B/377a88d1ecb04e959e20ca1b471f7b68, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/B/b1367b06ce7d4a2da40643788a9131aa, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/B/d7ac5a2950354648bbf2f9b22aa7c25d, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/B/7dabef92fc574c5b8188a085f9f6b74e, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/B/ccafbf84414d4ea19aef69278fbfe7e7, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/B/dd24dd9043ea4892bf2afe99be1335d8, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/B/539d0d0a92ad472297a8a01d6709057f, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/B/62e2b8ed72d0480995e27bc5ac4c92f0, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/B/21950afa029f4d42822d3c974b418ac6, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/B/d161d12d2d964ee39be16db7c4802375, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/B/20ef227323754aa4967559eafa504aea, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/B/3bf35279fc7e4d05af0a7c343eb2d242, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/B/662fabc51125429d93e6b9ead1a439fe, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/B/a4945403e6a4453c8ea7b7637f1a1cee, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/B/b1c1ca14321342768f40166190c38dad, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/B/626a4f51a1d648fc8d4a0c04266f79e1, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/B/cc2580b982ef47639486728bda1005eb, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/B/1b0d63a6e6c046e5911cab0d69ea7db6, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/B/535b67126e8b48628228c3a014b70754, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/B/df59ae7c752643c192593d6af6c0927f, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/B/691c0a3685b541d1ab1a5829b8f1b70e, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/B/1c546b28cc6441eda20faa18e01392bf, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/B/12ed812a38ab495eb3d758f69ca0a572, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/B/aaf22d8ba67b4722bab4ece37053cc17, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/B/b4ec7d87654e4785a188761ef6e45c9c, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/B/3cd8225a40da4939807f2cc3ad8a06d5, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/B/b401e20bf4cb481db819cf0a841fc922] to archive 2024-12-06T10:18:47,064 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/552d6a33fa09:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(360): Archiving compacted files. 2024-12-06T10:18:47,065 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/552d6a33fa09:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/B/c5db907d1a3848aead612ec9bbcaf145 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/B/c5db907d1a3848aead612ec9bbcaf145 2024-12-06T10:18:47,065 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/552d6a33fa09:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/B/377a88d1ecb04e959e20ca1b471f7b68 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/B/377a88d1ecb04e959e20ca1b471f7b68 2024-12-06T10:18:47,066 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/552d6a33fa09:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/B/b1367b06ce7d4a2da40643788a9131aa to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/B/b1367b06ce7d4a2da40643788a9131aa 2024-12-06T10:18:47,067 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/552d6a33fa09:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/B/d7ac5a2950354648bbf2f9b22aa7c25d to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/B/d7ac5a2950354648bbf2f9b22aa7c25d 2024-12-06T10:18:47,067 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/552d6a33fa09:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/B/7dabef92fc574c5b8188a085f9f6b74e to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/B/7dabef92fc574c5b8188a085f9f6b74e 2024-12-06T10:18:47,068 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/552d6a33fa09:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/B/ccafbf84414d4ea19aef69278fbfe7e7 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/B/ccafbf84414d4ea19aef69278fbfe7e7 2024-12-06T10:18:47,069 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/552d6a33fa09:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/B/dd24dd9043ea4892bf2afe99be1335d8 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/B/dd24dd9043ea4892bf2afe99be1335d8 2024-12-06T10:18:47,069 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/552d6a33fa09:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/B/539d0d0a92ad472297a8a01d6709057f to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/B/539d0d0a92ad472297a8a01d6709057f 2024-12-06T10:18:47,070 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/552d6a33fa09:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/B/62e2b8ed72d0480995e27bc5ac4c92f0 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/B/62e2b8ed72d0480995e27bc5ac4c92f0 2024-12-06T10:18:47,071 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/552d6a33fa09:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/B/21950afa029f4d42822d3c974b418ac6 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/B/21950afa029f4d42822d3c974b418ac6 2024-12-06T10:18:47,072 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/552d6a33fa09:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/B/d161d12d2d964ee39be16db7c4802375 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/B/d161d12d2d964ee39be16db7c4802375 2024-12-06T10:18:47,073 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/552d6a33fa09:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/B/20ef227323754aa4967559eafa504aea to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/B/20ef227323754aa4967559eafa504aea 2024-12-06T10:18:47,073 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/552d6a33fa09:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/B/3bf35279fc7e4d05af0a7c343eb2d242 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/B/3bf35279fc7e4d05af0a7c343eb2d242 2024-12-06T10:18:47,074 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/552d6a33fa09:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/B/662fabc51125429d93e6b9ead1a439fe to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/B/662fabc51125429d93e6b9ead1a439fe 2024-12-06T10:18:47,075 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/552d6a33fa09:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/B/a4945403e6a4453c8ea7b7637f1a1cee to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/B/a4945403e6a4453c8ea7b7637f1a1cee 2024-12-06T10:18:47,076 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/552d6a33fa09:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/B/b1c1ca14321342768f40166190c38dad to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/B/b1c1ca14321342768f40166190c38dad 2024-12-06T10:18:47,077 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/552d6a33fa09:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/B/626a4f51a1d648fc8d4a0c04266f79e1 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/B/626a4f51a1d648fc8d4a0c04266f79e1 2024-12-06T10:18:47,078 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/552d6a33fa09:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/B/cc2580b982ef47639486728bda1005eb to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/B/cc2580b982ef47639486728bda1005eb 2024-12-06T10:18:47,079 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/552d6a33fa09:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/B/1b0d63a6e6c046e5911cab0d69ea7db6 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/B/1b0d63a6e6c046e5911cab0d69ea7db6 2024-12-06T10:18:47,080 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/552d6a33fa09:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/B/535b67126e8b48628228c3a014b70754 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/B/535b67126e8b48628228c3a014b70754 2024-12-06T10:18:47,081 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/552d6a33fa09:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/B/df59ae7c752643c192593d6af6c0927f to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/B/df59ae7c752643c192593d6af6c0927f 2024-12-06T10:18:47,081 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/552d6a33fa09:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/B/691c0a3685b541d1ab1a5829b8f1b70e to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/B/691c0a3685b541d1ab1a5829b8f1b70e 2024-12-06T10:18:47,082 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/552d6a33fa09:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/B/1c546b28cc6441eda20faa18e01392bf to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/B/1c546b28cc6441eda20faa18e01392bf 2024-12-06T10:18:47,083 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/552d6a33fa09:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/B/12ed812a38ab495eb3d758f69ca0a572 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/B/12ed812a38ab495eb3d758f69ca0a572 2024-12-06T10:18:47,084 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/552d6a33fa09:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/B/aaf22d8ba67b4722bab4ece37053cc17 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/B/aaf22d8ba67b4722bab4ece37053cc17 2024-12-06T10:18:47,085 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/552d6a33fa09:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/B/b4ec7d87654e4785a188761ef6e45c9c to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/B/b4ec7d87654e4785a188761ef6e45c9c 2024-12-06T10:18:47,085 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/552d6a33fa09:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/B/3cd8225a40da4939807f2cc3ad8a06d5 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/B/3cd8225a40da4939807f2cc3ad8a06d5 2024-12-06T10:18:47,086 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/552d6a33fa09:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/B/b401e20bf4cb481db819cf0a841fc922 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/B/b401e20bf4cb481db819cf0a841fc922 2024-12-06T10:18:47,089 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/552d6a33fa09:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] regionserver.HStore(2316): Moving the files [hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/C/84eb67c638f048e4a929ba5250bbf237, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/C/0d327fbac0484ead8772e8367223768f, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/C/95d48a7174444bf8a5d6dd0aa4947f13, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/C/1c376718eccc46b4ab24410fcd5afd6f, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/C/166e997521724a6880f16c14f5d09542, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/C/8f76fbc799bd4b7dafe92cc292c45d53, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/C/41f75f32711f4022a5ddc2a99844b57d, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/C/44bdc2371c8d4644a0dc21bbb000d5e0, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/C/794bdf2a94a448b494b33aecabab93d8, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/C/b65e0b4e05ae48a3b6d685e7b3bbeed2, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/C/560d62365081467cbeb5c351af19e9ff, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/C/6e14c7f1826042f09a9b0cdb0a4ed180, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/C/500df7f35a574de0afd25ea6726c8f48, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/C/d5b10eeb4b944763a80a6d5f49f31064, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/C/6e96852b9e364f46a36c28c075dde785, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/C/35fab34a76d14c4b9c8b8c86e05a63b8, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/C/cb3ccfb8929d4960b6da3358e6559f83, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/C/fbe6f3d61f424f279cb0082bb58df4c6, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/C/5dc675390e6e4036b185a4f1379a694a, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/C/b2f468b415f34478a9441680107ead08, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/C/9ce62a3633ba4bb1a9d6d30c53aa690c, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/C/e86f7e2132d4400190355b05631eeca2, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/C/e970362747124a67b0d76585f54df470, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/C/2020b9e60fdf4e868c77516dc25890ad, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/C/fe076c5bea9041d89aed7e7519a587cc] to archive 2024-12-06T10:18:47,090 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/552d6a33fa09:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(360): Archiving compacted files. 2024-12-06T10:18:47,090 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/552d6a33fa09:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/C/84eb67c638f048e4a929ba5250bbf237 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/C/84eb67c638f048e4a929ba5250bbf237 2024-12-06T10:18:47,091 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/552d6a33fa09:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/C/0d327fbac0484ead8772e8367223768f to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/C/0d327fbac0484ead8772e8367223768f 2024-12-06T10:18:47,092 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/552d6a33fa09:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/C/95d48a7174444bf8a5d6dd0aa4947f13 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/C/95d48a7174444bf8a5d6dd0aa4947f13 2024-12-06T10:18:47,093 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/552d6a33fa09:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/C/1c376718eccc46b4ab24410fcd5afd6f to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/C/1c376718eccc46b4ab24410fcd5afd6f 2024-12-06T10:18:47,093 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/552d6a33fa09:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/C/166e997521724a6880f16c14f5d09542 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/C/166e997521724a6880f16c14f5d09542 2024-12-06T10:18:47,094 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/552d6a33fa09:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/C/8f76fbc799bd4b7dafe92cc292c45d53 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/C/8f76fbc799bd4b7dafe92cc292c45d53 2024-12-06T10:18:47,095 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/552d6a33fa09:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/C/41f75f32711f4022a5ddc2a99844b57d to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/C/41f75f32711f4022a5ddc2a99844b57d 2024-12-06T10:18:47,096 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/552d6a33fa09:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/C/44bdc2371c8d4644a0dc21bbb000d5e0 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/C/44bdc2371c8d4644a0dc21bbb000d5e0 2024-12-06T10:18:47,096 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/552d6a33fa09:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/C/794bdf2a94a448b494b33aecabab93d8 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/C/794bdf2a94a448b494b33aecabab93d8 2024-12-06T10:18:47,097 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/552d6a33fa09:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/C/b65e0b4e05ae48a3b6d685e7b3bbeed2 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/C/b65e0b4e05ae48a3b6d685e7b3bbeed2 2024-12-06T10:18:47,098 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/552d6a33fa09:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/C/560d62365081467cbeb5c351af19e9ff to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/C/560d62365081467cbeb5c351af19e9ff 2024-12-06T10:18:47,098 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/552d6a33fa09:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/C/6e14c7f1826042f09a9b0cdb0a4ed180 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/C/6e14c7f1826042f09a9b0cdb0a4ed180 2024-12-06T10:18:47,099 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/552d6a33fa09:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/C/500df7f35a574de0afd25ea6726c8f48 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/C/500df7f35a574de0afd25ea6726c8f48 2024-12-06T10:18:47,100 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/552d6a33fa09:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/C/d5b10eeb4b944763a80a6d5f49f31064 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/C/d5b10eeb4b944763a80a6d5f49f31064 2024-12-06T10:18:47,100 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/552d6a33fa09:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/C/6e96852b9e364f46a36c28c075dde785 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/C/6e96852b9e364f46a36c28c075dde785 2024-12-06T10:18:47,101 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/552d6a33fa09:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/C/35fab34a76d14c4b9c8b8c86e05a63b8 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/C/35fab34a76d14c4b9c8b8c86e05a63b8 2024-12-06T10:18:47,102 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/552d6a33fa09:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/C/cb3ccfb8929d4960b6da3358e6559f83 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/C/cb3ccfb8929d4960b6da3358e6559f83 2024-12-06T10:18:47,102 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/552d6a33fa09:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/C/fbe6f3d61f424f279cb0082bb58df4c6 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/C/fbe6f3d61f424f279cb0082bb58df4c6 2024-12-06T10:18:47,103 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/552d6a33fa09:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/C/5dc675390e6e4036b185a4f1379a694a to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/C/5dc675390e6e4036b185a4f1379a694a 2024-12-06T10:18:47,104 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/552d6a33fa09:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/C/b2f468b415f34478a9441680107ead08 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/C/b2f468b415f34478a9441680107ead08 2024-12-06T10:18:47,104 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/552d6a33fa09:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/C/9ce62a3633ba4bb1a9d6d30c53aa690c to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/C/9ce62a3633ba4bb1a9d6d30c53aa690c 2024-12-06T10:18:47,105 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/552d6a33fa09:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/C/e86f7e2132d4400190355b05631eeca2 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/C/e86f7e2132d4400190355b05631eeca2 2024-12-06T10:18:47,106 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/552d6a33fa09:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/C/e970362747124a67b0d76585f54df470 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/C/e970362747124a67b0d76585f54df470 2024-12-06T10:18:47,107 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/552d6a33fa09:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/C/2020b9e60fdf4e868c77516dc25890ad to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/C/2020b9e60fdf4e868c77516dc25890ad 2024-12-06T10:18:47,108 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/552d6a33fa09:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/C/fe076c5bea9041d89aed7e7519a587cc to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/C/fe076c5bea9041d89aed7e7519a587cc 2024-12-06T10:18:47,209 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/.tmp/C/751ef058754b47909136b09ac6e9c246 as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/C/751ef058754b47909136b09ac6e9c246 2024-12-06T10:18:47,212 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 0a237205d558afc218e72c1705b7c48d/C of 0a237205d558afc218e72c1705b7c48d into 751ef058754b47909136b09ac6e9c246(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-06T10:18:47,212 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 0a237205d558afc218e72c1705b7c48d: 2024-12-06T10:18:47,212 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d., storeName=0a237205d558afc218e72c1705b7c48d/C, priority=13, startTime=1733480326356; duration=0sec 2024-12-06T10:18:47,212 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T10:18:47,212 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 0a237205d558afc218e72c1705b7c48d:C 2024-12-06T10:18:47,223 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=408 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/.tmp/B/d5cc8235d44c4f28b81d11ce2893b0dd 2024-12-06T10:18:47,229 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/.tmp/C/f737fce5929d4c979adc7724928ed1a7 is 50, key is test_row_0/C:col10/1733480324721/Put/seqid=0 2024-12-06T10:18:47,232 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742351_1527 (size=12301) 2024-12-06T10:18:47,276 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=120 2024-12-06T10:18:47,542 DEBUG [Thread-1811 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x7ec15031 to 127.0.0.1:61610 2024-12-06T10:18:47,542 DEBUG [Thread-1811 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-06T10:18:47,571 DEBUG [Thread-1813 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x3dd5b441 to 127.0.0.1:61610 2024-12-06T10:18:47,572 DEBUG [Thread-1813 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-06T10:18:47,632 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=408 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/.tmp/C/f737fce5929d4c979adc7724928ed1a7 2024-12-06T10:18:47,636 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/.tmp/A/d58b51853e9b47369422561c8f46ceb0 as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/A/d58b51853e9b47369422561c8f46ceb0 2024-12-06T10:18:47,639 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/A/d58b51853e9b47369422561c8f46ceb0, entries=150, sequenceid=408, filesize=30.5 K 2024-12-06T10:18:47,639 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/.tmp/B/d5cc8235d44c4f28b81d11ce2893b0dd as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/B/d5cc8235d44c4f28b81d11ce2893b0dd 2024-12-06T10:18:47,641 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/B/d5cc8235d44c4f28b81d11ce2893b0dd, entries=150, sequenceid=408, filesize=12.0 K 2024-12-06T10:18:47,642 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/.tmp/C/f737fce5929d4c979adc7724928ed1a7 as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/C/f737fce5929d4c979adc7724928ed1a7 2024-12-06T10:18:47,644 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/C/f737fce5929d4c979adc7724928ed1a7, entries=150, sequenceid=408, filesize=12.0 K 2024-12-06T10:18:47,645 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HRegion(3040): Finished flush of dataSize ~73.80 KB/75570, heapSize ~194.06 KB/198720, currentSize=33.54 KB/34350 for 0a237205d558afc218e72c1705b7c48d in 1251ms, sequenceid=408, compaction requested=false 2024-12-06T10:18:47,645 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HRegion(2538): Flush status journal for 0a237205d558afc218e72c1705b7c48d: 2024-12-06T10:18:47,645 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d. 2024-12-06T10:18:47,645 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=121 2024-12-06T10:18:47,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.HMaster(4106): Remote procedure done, pid=121 2024-12-06T10:18:47,647 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=121, resume processing ppid=120 2024-12-06T10:18:47,647 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=121, ppid=120, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.4730 sec 2024-12-06T10:18:47,648 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=120, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=120, table=TestAcidGuarantees in 2.4760 sec 2024-12-06T10:18:49,277 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=120 2024-12-06T10:18:49,277 INFO [Thread-1817 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 120 completed 2024-12-06T10:18:49,277 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(392): Finished test. Writers: 2024-12-06T10:18:49,277 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 71 2024-12-06T10:18:49,277 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 75 2024-12-06T10:18:49,277 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 46 2024-12-06T10:18:49,277 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 43 2024-12-06T10:18:49,277 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 87 2024-12-06T10:18:49,277 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(396): Readers: 2024-12-06T10:18:49,277 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(400): Scanners: 2024-12-06T10:18:49,277 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 2024 2024-12-06T10:18:49,277 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 6072 rows 2024-12-06T10:18:49,277 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 2012 2024-12-06T10:18:49,277 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 6036 rows 2024-12-06T10:18:49,277 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 2020 2024-12-06T10:18:49,277 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 6060 rows 2024-12-06T10:18:49,277 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 2022 2024-12-06T10:18:49,277 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 6066 rows 2024-12-06T10:18:49,277 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 2012 2024-12-06T10:18:49,277 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 6036 rows 2024-12-06T10:18:49,277 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-12-06T10:18:49,277 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x2b976e1a to 127.0.0.1:61610 2024-12-06T10:18:49,278 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-06T10:18:49,280 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of TestAcidGuarantees 2024-12-06T10:18:49,281 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.2 disable TestAcidGuarantees 2024-12-06T10:18:49,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] procedure2.ProcedureExecutor(1098): Stored pid=122, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=TestAcidGuarantees 2024-12-06T10:18:49,284 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=122 2024-12-06T10:18:49,285 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733480329284"}]},"ts":"1733480329284"} 2024-12-06T10:18:49,286 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLING in hbase:meta 2024-12-06T10:18:49,288 INFO [PEWorker-1 {}] procedure.DisableTableProcedure(284): Set TestAcidGuarantees to state=DISABLING 2024-12-06T10:18:49,289 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=123, ppid=122, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=TestAcidGuarantees}] 2024-12-06T10:18:49,289 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=124, ppid=123, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=0a237205d558afc218e72c1705b7c48d, UNASSIGN}] 2024-12-06T10:18:49,290 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=124, ppid=123, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=0a237205d558afc218e72c1705b7c48d, UNASSIGN 2024-12-06T10:18:49,290 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=124 updating hbase:meta row=0a237205d558afc218e72c1705b7c48d, regionState=CLOSING, regionLocation=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:49,291 DEBUG [PEWorker-2 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-06T10:18:49,291 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=125, ppid=124, state=RUNNABLE; CloseRegionProcedure 0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743}] 2024-12-06T10:18:49,384 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=122 2024-12-06T10:18:49,442 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,33397,1733480204743 2024-12-06T10:18:49,443 INFO [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION, pid=125}] handler.UnassignRegionHandler(124): Close 0a237205d558afc218e72c1705b7c48d 2024-12-06T10:18:49,443 DEBUG [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION, pid=125}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-06T10:18:49,443 DEBUG [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION, pid=125}] regionserver.HRegion(1681): Closing 0a237205d558afc218e72c1705b7c48d, disabling compactions & flushes 2024-12-06T10:18:49,443 INFO [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION, pid=125}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d. 2024-12-06T10:18:49,443 DEBUG [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION, pid=125}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d. 2024-12-06T10:18:49,443 DEBUG [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION, pid=125}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d. after waiting 0 ms 2024-12-06T10:18:49,443 DEBUG [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION, pid=125}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d. 2024-12-06T10:18:49,443 INFO [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION, pid=125}] regionserver.HRegion(2837): Flushing 0a237205d558afc218e72c1705b7c48d 3/3 column families, dataSize=33.54 KB heapSize=88.64 KB 2024-12-06T10:18:49,443 DEBUG [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION, pid=125}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0a237205d558afc218e72c1705b7c48d, store=A 2024-12-06T10:18:49,443 DEBUG [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION, pid=125}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:18:49,443 DEBUG [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION, pid=125}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0a237205d558afc218e72c1705b7c48d, store=B 2024-12-06T10:18:49,444 DEBUG [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION, pid=125}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:18:49,444 DEBUG [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION, pid=125}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0a237205d558afc218e72c1705b7c48d, store=C 2024-12-06T10:18:49,444 DEBUG [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION, pid=125}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:18:49,448 DEBUG [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION, pid=125}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412067d5236ca5f8f4709a23ff2878de6c266_0a237205d558afc218e72c1705b7c48d is 50, key is test_row_1/A:col10/1733480327570/Put/seqid=0 2024-12-06T10:18:49,451 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742352_1528 (size=9914) 2024-12-06T10:18:49,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=122 2024-12-06T10:18:49,852 DEBUG [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION, pid=125}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:49,855 INFO [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION, pid=125}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412067d5236ca5f8f4709a23ff2878de6c266_0a237205d558afc218e72c1705b7c48d to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412067d5236ca5f8f4709a23ff2878de6c266_0a237205d558afc218e72c1705b7c48d 2024-12-06T10:18:49,855 DEBUG [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION, pid=125}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/.tmp/A/34e40f74b29849baa91959a9f55e300b, store: [table=TestAcidGuarantees family=A region=0a237205d558afc218e72c1705b7c48d] 2024-12-06T10:18:49,856 DEBUG [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION, pid=125}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/.tmp/A/34e40f74b29849baa91959a9f55e300b is 175, key is test_row_1/A:col10/1733480327570/Put/seqid=0 2024-12-06T10:18:49,859 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742353_1529 (size=22561) 2024-12-06T10:18:49,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=122 2024-12-06T10:18:50,260 INFO [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION, pid=125}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=419, memsize=11.2 K, hasBloomFilter=true, into tmp file hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/.tmp/A/34e40f74b29849baa91959a9f55e300b 2024-12-06T10:18:50,265 DEBUG [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION, pid=125}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/.tmp/B/2d30e4aee29d43ab8bc0498c18dd541b is 50, key is test_row_1/B:col10/1733480327570/Put/seqid=0 2024-12-06T10:18:50,268 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742354_1530 (size=9857) 2024-12-06T10:18:50,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=122 2024-12-06T10:18:50,668 INFO [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION, pid=125}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=419 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/.tmp/B/2d30e4aee29d43ab8bc0498c18dd541b 2024-12-06T10:18:50,674 DEBUG [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION, pid=125}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/.tmp/C/589b9619def14d50ab62aaef253585c7 is 50, key is test_row_1/C:col10/1733480327570/Put/seqid=0 2024-12-06T10:18:50,677 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742355_1531 (size=9857) 2024-12-06T10:18:51,077 INFO [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION, pid=125}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=419 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/.tmp/C/589b9619def14d50ab62aaef253585c7 2024-12-06T10:18:51,081 DEBUG [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION, pid=125}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/.tmp/A/34e40f74b29849baa91959a9f55e300b as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/A/34e40f74b29849baa91959a9f55e300b 2024-12-06T10:18:51,083 INFO [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION, pid=125}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/A/34e40f74b29849baa91959a9f55e300b, entries=100, sequenceid=419, filesize=22.0 K 2024-12-06T10:18:51,084 DEBUG [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION, pid=125}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/.tmp/B/2d30e4aee29d43ab8bc0498c18dd541b as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/B/2d30e4aee29d43ab8bc0498c18dd541b 2024-12-06T10:18:51,086 INFO [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION, pid=125}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/B/2d30e4aee29d43ab8bc0498c18dd541b, entries=100, sequenceid=419, filesize=9.6 K 2024-12-06T10:18:51,087 DEBUG [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION, pid=125}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/.tmp/C/589b9619def14d50ab62aaef253585c7 as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/C/589b9619def14d50ab62aaef253585c7 2024-12-06T10:18:51,089 INFO [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION, pid=125}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/C/589b9619def14d50ab62aaef253585c7, entries=100, sequenceid=419, filesize=9.6 K 2024-12-06T10:18:51,090 INFO [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION, pid=125}] regionserver.HRegion(3040): Finished flush of dataSize ~33.54 KB/34350, heapSize ~88.59 KB/90720, currentSize=0 B/0 for 0a237205d558afc218e72c1705b7c48d in 1647ms, sequenceid=419, compaction requested=true 2024-12-06T10:18:51,091 DEBUG [StoreCloser-TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/C/80a06cf164bd4fb7aa55d088f45e1c79, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/C/29297e316b5b4938b02f33d0b49947ad, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/C/bb0654dae3074850afe508ec737342a2] to archive 2024-12-06T10:18:51,091 DEBUG [StoreCloser-TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-12-06T10:18:51,093 DEBUG [StoreCloser-TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/C/80a06cf164bd4fb7aa55d088f45e1c79 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/C/80a06cf164bd4fb7aa55d088f45e1c79 2024-12-06T10:18:51,093 DEBUG [StoreCloser-TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/C/29297e316b5b4938b02f33d0b49947ad to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/C/29297e316b5b4938b02f33d0b49947ad 2024-12-06T10:18:51,094 DEBUG [StoreCloser-TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/C/bb0654dae3074850afe508ec737342a2 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/C/bb0654dae3074850afe508ec737342a2 2024-12-06T10:18:51,098 DEBUG [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION, pid=125}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/recovered.edits/422.seqid, newMaxSeqId=422, maxSeqId=4 2024-12-06T10:18:51,099 INFO [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION, pid=125}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d. 2024-12-06T10:18:51,099 DEBUG [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION, pid=125}] regionserver.HRegion(1635): Region close journal for 0a237205d558afc218e72c1705b7c48d: 2024-12-06T10:18:51,100 INFO [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION, pid=125}] handler.UnassignRegionHandler(170): Closed 0a237205d558afc218e72c1705b7c48d 2024-12-06T10:18:51,101 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=124 updating hbase:meta row=0a237205d558afc218e72c1705b7c48d, regionState=CLOSED 2024-12-06T10:18:51,102 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=125, resume processing ppid=124 2024-12-06T10:18:51,102 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=125, ppid=124, state=SUCCESS; CloseRegionProcedure 0a237205d558afc218e72c1705b7c48d, server=552d6a33fa09,33397,1733480204743 in 1.8100 sec 2024-12-06T10:18:51,103 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=124, resume processing ppid=123 2024-12-06T10:18:51,103 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=124, ppid=123, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=0a237205d558afc218e72c1705b7c48d, UNASSIGN in 1.8130 sec 2024-12-06T10:18:51,105 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=123, resume processing ppid=122 2024-12-06T10:18:51,105 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=123, ppid=122, state=SUCCESS; CloseTableRegionsProcedure table=TestAcidGuarantees in 1.8160 sec 2024-12-06T10:18:51,105 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733480331105"}]},"ts":"1733480331105"} 2024-12-06T10:18:51,106 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLED in hbase:meta 2024-12-06T10:18:51,108 INFO [PEWorker-2 {}] procedure.DisableTableProcedure(296): Set TestAcidGuarantees to state=DISABLED 2024-12-06T10:18:51,109 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=122, state=SUCCESS; DisableTableProcedure table=TestAcidGuarantees in 1.8270 sec 2024-12-06T10:18:51,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=122 2024-12-06T10:18:51,387 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:TestAcidGuarantees, procId: 122 completed 2024-12-06T10:18:51,388 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.2 delete TestAcidGuarantees 2024-12-06T10:18:51,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] procedure2.ProcedureExecutor(1098): Stored pid=126, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=TestAcidGuarantees 2024-12-06T10:18:51,389 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=126, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-06T10:18:51,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=126 2024-12-06T10:18:51,390 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=126, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-06T10:18:51,391 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d 2024-12-06T10:18:51,393 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/A, FileablePath, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/B, FileablePath, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/C, FileablePath, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/recovered.edits] 2024-12-06T10:18:51,395 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/A/34e40f74b29849baa91959a9f55e300b to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/A/34e40f74b29849baa91959a9f55e300b 2024-12-06T10:18:51,396 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/A/d58b51853e9b47369422561c8f46ceb0 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/A/d58b51853e9b47369422561c8f46ceb0 2024-12-06T10:18:51,397 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/A/f796daa1863d474a959df29010d262cb to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/A/f796daa1863d474a959df29010d262cb 2024-12-06T10:18:51,399 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/B/2d30e4aee29d43ab8bc0498c18dd541b to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/B/2d30e4aee29d43ab8bc0498c18dd541b 2024-12-06T10:18:51,399 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/B/588798a1ab07434c83ee80fb4e651739 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/B/588798a1ab07434c83ee80fb4e651739 2024-12-06T10:18:51,400 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/B/d5cc8235d44c4f28b81d11ce2893b0dd to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/B/d5cc8235d44c4f28b81d11ce2893b0dd 2024-12-06T10:18:51,402 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/C/589b9619def14d50ab62aaef253585c7 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/C/589b9619def14d50ab62aaef253585c7 2024-12-06T10:18:51,403 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/C/751ef058754b47909136b09ac6e9c246 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/C/751ef058754b47909136b09ac6e9c246 2024-12-06T10:18:51,403 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/C/f737fce5929d4c979adc7724928ed1a7 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/C/f737fce5929d4c979adc7724928ed1a7 2024-12-06T10:18:51,405 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/recovered.edits/422.seqid to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d/recovered.edits/422.seqid 2024-12-06T10:18:51,406 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/0a237205d558afc218e72c1705b7c48d 2024-12-06T10:18:51,406 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(313): Archived TestAcidGuarantees regions 2024-12-06T10:18:51,406 DEBUG [PEWorker-4 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3 2024-12-06T10:18:51,407 DEBUG [PEWorker-4 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A] 2024-12-06T10:18:51,409 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412060aa0c4d97df04194aa0f08788622060c_0a237205d558afc218e72c1705b7c48d to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412060aa0c4d97df04194aa0f08788622060c_0a237205d558afc218e72c1705b7c48d 2024-12-06T10:18:51,410 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412060f943210e2fc4ee19831f7a75def70ee_0a237205d558afc218e72c1705b7c48d to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412060f943210e2fc4ee19831f7a75def70ee_0a237205d558afc218e72c1705b7c48d 2024-12-06T10:18:51,410 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412061205270b40b24363b47d5715cb4cedd7_0a237205d558afc218e72c1705b7c48d to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412061205270b40b24363b47d5715cb4cedd7_0a237205d558afc218e72c1705b7c48d 2024-12-06T10:18:51,411 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412062738e021ee804f11bc33f785ad2fc104_0a237205d558afc218e72c1705b7c48d to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412062738e021ee804f11bc33f785ad2fc104_0a237205d558afc218e72c1705b7c48d 2024-12-06T10:18:51,412 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241206312d879309c24854910a569627b1e000_0a237205d558afc218e72c1705b7c48d to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241206312d879309c24854910a569627b1e000_0a237205d558afc218e72c1705b7c48d 2024-12-06T10:18:51,412 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241206315212ffe14e403e8657d4255d5197fe_0a237205d558afc218e72c1705b7c48d to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241206315212ffe14e403e8657d4255d5197fe_0a237205d558afc218e72c1705b7c48d 2024-12-06T10:18:51,414 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412065c01c7407b41405e95104bfde5c4964f_0a237205d558afc218e72c1705b7c48d to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412065c01c7407b41405e95104bfde5c4964f_0a237205d558afc218e72c1705b7c48d 2024-12-06T10:18:51,415 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241206601512bcd1bd43a0936b25373abd099a_0a237205d558afc218e72c1705b7c48d to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241206601512bcd1bd43a0936b25373abd099a_0a237205d558afc218e72c1705b7c48d 2024-12-06T10:18:51,416 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412066798a1f5078a4e7daffdc0633e310a2d_0a237205d558afc218e72c1705b7c48d to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412066798a1f5078a4e7daffdc0633e310a2d_0a237205d558afc218e72c1705b7c48d 2024-12-06T10:18:51,417 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024120669b1d403ebbc4b9a92cdaa02f75ce652_0a237205d558afc218e72c1705b7c48d to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024120669b1d403ebbc4b9a92cdaa02f75ce652_0a237205d558afc218e72c1705b7c48d 2024-12-06T10:18:51,417 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241206718264efe8d04d15b40e507635b8c656_0a237205d558afc218e72c1705b7c48d to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241206718264efe8d04d15b40e507635b8c656_0a237205d558afc218e72c1705b7c48d 2024-12-06T10:18:51,418 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412067d5236ca5f8f4709a23ff2878de6c266_0a237205d558afc218e72c1705b7c48d to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412067d5236ca5f8f4709a23ff2878de6c266_0a237205d558afc218e72c1705b7c48d 2024-12-06T10:18:51,419 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241206856218747b0441d5911a8586c0dec0aa_0a237205d558afc218e72c1705b7c48d to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241206856218747b0441d5911a8586c0dec0aa_0a237205d558afc218e72c1705b7c48d 2024-12-06T10:18:51,420 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241206982c69c1f5a445abbe89bf883e4c1fbb_0a237205d558afc218e72c1705b7c48d to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241206982c69c1f5a445abbe89bf883e4c1fbb_0a237205d558afc218e72c1705b7c48d 2024-12-06T10:18:51,420 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241206ada6979867264173a01ef1e500dc658e_0a237205d558afc218e72c1705b7c48d to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241206ada6979867264173a01ef1e500dc658e_0a237205d558afc218e72c1705b7c48d 2024-12-06T10:18:51,421 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241206ae3d0b12e18f4fc2a03284df4f078090_0a237205d558afc218e72c1705b7c48d to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241206ae3d0b12e18f4fc2a03284df4f078090_0a237205d558afc218e72c1705b7c48d 2024-12-06T10:18:51,422 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241206cae6c3ecb3024e29881527172c715ac3_0a237205d558afc218e72c1705b7c48d to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241206cae6c3ecb3024e29881527172c715ac3_0a237205d558afc218e72c1705b7c48d 2024-12-06T10:18:51,423 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241206d0596a9203ca42e8b6e8657f37316e69_0a237205d558afc218e72c1705b7c48d to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241206d0596a9203ca42e8b6e8657f37316e69_0a237205d558afc218e72c1705b7c48d 2024-12-06T10:18:51,423 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241206d30f46cc468a4d6a87d35d5aad33e651_0a237205d558afc218e72c1705b7c48d to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241206d30f46cc468a4d6a87d35d5aad33e651_0a237205d558afc218e72c1705b7c48d 2024-12-06T10:18:51,424 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241206d4fba879792b47c49fd691a0dd560dc1_0a237205d558afc218e72c1705b7c48d to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241206d4fba879792b47c49fd691a0dd560dc1_0a237205d558afc218e72c1705b7c48d 2024-12-06T10:18:51,425 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241206ee774b3c4daa4227a91eabefa8397873_0a237205d558afc218e72c1705b7c48d to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241206ee774b3c4daa4227a91eabefa8397873_0a237205d558afc218e72c1705b7c48d 2024-12-06T10:18:51,426 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241206fc709d11a4864c3182c901bdc960b045_0a237205d558afc218e72c1705b7c48d to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241206fc709d11a4864c3182c901bdc960b045_0a237205d558afc218e72c1705b7c48d 2024-12-06T10:18:51,426 DEBUG [PEWorker-4 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3 2024-12-06T10:18:51,428 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=126, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-06T10:18:51,429 WARN [PEWorker-4 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 1 rows of TestAcidGuarantees from hbase:meta 2024-12-06T10:18:51,430 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(407): Removing 'TestAcidGuarantees' descriptor. 2024-12-06T10:18:51,431 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=126, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-06T10:18:51,431 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(397): Removing 'TestAcidGuarantees' from region states. 2024-12-06T10:18:51,431 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733480331431"}]},"ts":"9223372036854775807"} 2024-12-06T10:18:51,433 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1808): Deleted 1 regions from META 2024-12-06T10:18:51,433 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => 0a237205d558afc218e72c1705b7c48d, NAME => 'TestAcidGuarantees,,1733480303291.0a237205d558afc218e72c1705b7c48d.', STARTKEY => '', ENDKEY => ''}] 2024-12-06T10:18:51,433 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(401): Marking 'TestAcidGuarantees' as deleted. 2024-12-06T10:18:51,433 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1733480331433"}]},"ts":"9223372036854775807"} 2024-12-06T10:18:51,434 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1678): Deleted table TestAcidGuarantees state from META 2024-12-06T10:18:51,436 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(133): Finished pid=126, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-06T10:18:51,436 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=126, state=SUCCESS; DeleteTableProcedure table=TestAcidGuarantees in 48 msec 2024-12-06T10:18:51,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=126 2024-12-06T10:18:51,491 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:TestAcidGuarantees, procId: 126 completed 2024-12-06T10:18:51,500 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: TestAcidGuaranteesWithAdaptivePolicy#testMobScanAtomicity Thread=241 (was 237) - Thread LEAK? -, OpenFileDescriptor=456 (was 452) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=408 (was 401) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=6446 (was 7227) 2024-12-06T10:18:51,511 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: TestAcidGuaranteesWithAdaptivePolicy#testScanAtomicity Thread=241, OpenFileDescriptor=456, MaxFileDescriptor=1048576, SystemLoadAverage=408, ProcessCount=11, AvailableMemoryMB=6444 2024-12-06T10:18:51,512 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-12-06T10:18:51,513 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-06T10:18:51,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] procedure2.ProcedureExecutor(1098): Stored pid=127, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=TestAcidGuarantees 2024-12-06T10:18:51,514 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=127, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_PRE_OPERATION 2024-12-06T10:18:51,514 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:51,514 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestAcidGuarantees" procId is: 127 2024-12-06T10:18:51,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=127 2024-12-06T10:18:51,515 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=127, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-06T10:18:51,520 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742356_1532 (size=963) 2024-12-06T10:18:51,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=127 2024-12-06T10:18:51,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=127 2024-12-06T10:18:51,922 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => fa004d65282160f629f3eb2a5c9dca1d, NAME => 'TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4 2024-12-06T10:18:51,926 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742357_1533 (size=53) 2024-12-06T10:18:52,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=127 2024-12-06T10:18:52,327 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-06T10:18:52,327 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1681): Closing fa004d65282160f629f3eb2a5c9dca1d, disabling compactions & flushes 2024-12-06T10:18:52,327 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d. 2024-12-06T10:18:52,327 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d. 2024-12-06T10:18:52,328 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d. after waiting 0 ms 2024-12-06T10:18:52,328 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d. 2024-12-06T10:18:52,328 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d. 2024-12-06T10:18:52,328 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1635): Region close journal for fa004d65282160f629f3eb2a5c9dca1d: 2024-12-06T10:18:52,328 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=127, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ADD_TO_META 2024-12-06T10:18:52,329 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d.","families":{"info":[{"qualifier":"regioninfo","vlen":52,"tag":[],"timestamp":"1733480332328"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733480332328"}]},"ts":"1733480332328"} 2024-12-06T10:18:52,329 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-12-06T10:18:52,330 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=127, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-06T10:18:52,330 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733480332330"}]},"ts":"1733480332330"} 2024-12-06T10:18:52,331 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLING in hbase:meta 2024-12-06T10:18:52,334 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=128, ppid=127, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=fa004d65282160f629f3eb2a5c9dca1d, ASSIGN}] 2024-12-06T10:18:52,335 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=128, ppid=127, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=fa004d65282160f629f3eb2a5c9dca1d, ASSIGN 2024-12-06T10:18:52,336 INFO [PEWorker-1 {}] assignment.TransitRegionStateProcedure(264): Starting pid=128, ppid=127, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=fa004d65282160f629f3eb2a5c9dca1d, ASSIGN; state=OFFLINE, location=552d6a33fa09,33397,1733480204743; forceNewPlan=false, retain=false 2024-12-06T10:18:52,486 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=128 updating hbase:meta row=fa004d65282160f629f3eb2a5c9dca1d, regionState=OPENING, regionLocation=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:52,487 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=129, ppid=128, state=RUNNABLE; OpenRegionProcedure fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743}] 2024-12-06T10:18:52,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=127 2024-12-06T10:18:52,639 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,33397,1733480204743 2024-12-06T10:18:52,641 INFO [RS_OPEN_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_REGION, pid=129}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d. 2024-12-06T10:18:52,641 DEBUG [RS_OPEN_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_REGION, pid=129}] regionserver.HRegion(7285): Opening region: {ENCODED => fa004d65282160f629f3eb2a5c9dca1d, NAME => 'TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d.', STARTKEY => '', ENDKEY => ''} 2024-12-06T10:18:52,641 DEBUG [RS_OPEN_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_REGION, pid=129}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees fa004d65282160f629f3eb2a5c9dca1d 2024-12-06T10:18:52,641 DEBUG [RS_OPEN_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_REGION, pid=129}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-06T10:18:52,641 DEBUG [RS_OPEN_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_REGION, pid=129}] regionserver.HRegion(7327): checking encryption for fa004d65282160f629f3eb2a5c9dca1d 2024-12-06T10:18:52,641 DEBUG [RS_OPEN_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_REGION, pid=129}] regionserver.HRegion(7330): checking classloading for fa004d65282160f629f3eb2a5c9dca1d 2024-12-06T10:18:52,642 INFO [StoreOpener-fa004d65282160f629f3eb2a5c9dca1d-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region fa004d65282160f629f3eb2a5c9dca1d 2024-12-06T10:18:52,643 INFO [StoreOpener-fa004d65282160f629f3eb2a5c9dca1d-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-06T10:18:52,644 INFO [StoreOpener-fa004d65282160f629f3eb2a5c9dca1d-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region fa004d65282160f629f3eb2a5c9dca1d columnFamilyName A 2024-12-06T10:18:52,644 DEBUG [StoreOpener-fa004d65282160f629f3eb2a5c9dca1d-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:52,644 INFO [StoreOpener-fa004d65282160f629f3eb2a5c9dca1d-1 {}] regionserver.HStore(327): Store=fa004d65282160f629f3eb2a5c9dca1d/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-06T10:18:52,644 INFO [StoreOpener-fa004d65282160f629f3eb2a5c9dca1d-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region fa004d65282160f629f3eb2a5c9dca1d 2024-12-06T10:18:52,645 INFO [StoreOpener-fa004d65282160f629f3eb2a5c9dca1d-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-06T10:18:52,645 INFO [StoreOpener-fa004d65282160f629f3eb2a5c9dca1d-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region fa004d65282160f629f3eb2a5c9dca1d columnFamilyName B 2024-12-06T10:18:52,645 DEBUG [StoreOpener-fa004d65282160f629f3eb2a5c9dca1d-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:52,646 INFO [StoreOpener-fa004d65282160f629f3eb2a5c9dca1d-1 {}] regionserver.HStore(327): Store=fa004d65282160f629f3eb2a5c9dca1d/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-06T10:18:52,646 INFO [StoreOpener-fa004d65282160f629f3eb2a5c9dca1d-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region fa004d65282160f629f3eb2a5c9dca1d 2024-12-06T10:18:52,647 INFO [StoreOpener-fa004d65282160f629f3eb2a5c9dca1d-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-06T10:18:52,647 INFO [StoreOpener-fa004d65282160f629f3eb2a5c9dca1d-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region fa004d65282160f629f3eb2a5c9dca1d columnFamilyName C 2024-12-06T10:18:52,647 DEBUG [StoreOpener-fa004d65282160f629f3eb2a5c9dca1d-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:18:52,647 INFO [StoreOpener-fa004d65282160f629f3eb2a5c9dca1d-1 {}] regionserver.HStore(327): Store=fa004d65282160f629f3eb2a5c9dca1d/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-06T10:18:52,647 INFO [RS_OPEN_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_REGION, pid=129}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d. 2024-12-06T10:18:52,648 DEBUG [RS_OPEN_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_REGION, pid=129}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d 2024-12-06T10:18:52,648 DEBUG [RS_OPEN_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_REGION, pid=129}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d 2024-12-06T10:18:52,649 DEBUG [RS_OPEN_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_REGION, pid=129}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-06T10:18:52,650 DEBUG [RS_OPEN_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_REGION, pid=129}] regionserver.HRegion(1085): writing seq id for fa004d65282160f629f3eb2a5c9dca1d 2024-12-06T10:18:52,651 DEBUG [RS_OPEN_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_REGION, pid=129}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-06T10:18:52,651 INFO [RS_OPEN_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_REGION, pid=129}] regionserver.HRegion(1102): Opened fa004d65282160f629f3eb2a5c9dca1d; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=60412930, jitterRate=-0.09977719187736511}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-06T10:18:52,652 DEBUG [RS_OPEN_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_REGION, pid=129}] regionserver.HRegion(1001): Region open journal for fa004d65282160f629f3eb2a5c9dca1d: 2024-12-06T10:18:52,653 INFO [RS_OPEN_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_REGION, pid=129}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d., pid=129, masterSystemTime=1733480332638 2024-12-06T10:18:52,654 DEBUG [RS_OPEN_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_REGION, pid=129}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d. 2024-12-06T10:18:52,654 INFO [RS_OPEN_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_REGION, pid=129}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d. 2024-12-06T10:18:52,654 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=128 updating hbase:meta row=fa004d65282160f629f3eb2a5c9dca1d, regionState=OPEN, openSeqNum=2, regionLocation=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:52,656 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=129, resume processing ppid=128 2024-12-06T10:18:52,656 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=129, ppid=128, state=SUCCESS; OpenRegionProcedure fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 in 168 msec 2024-12-06T10:18:52,657 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=128, resume processing ppid=127 2024-12-06T10:18:52,657 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=128, ppid=127, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=fa004d65282160f629f3eb2a5c9dca1d, ASSIGN in 322 msec 2024-12-06T10:18:52,657 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=127, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-06T10:18:52,658 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733480332657"}]},"ts":"1733480332657"} 2024-12-06T10:18:52,658 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLED in hbase:meta 2024-12-06T10:18:52,660 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=127, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_POST_OPERATION 2024-12-06T10:18:52,661 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=127, state=SUCCESS; CreateTableProcedure table=TestAcidGuarantees in 1.1470 sec 2024-12-06T10:18:53,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=127 2024-12-06T10:18:53,619 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:TestAcidGuarantees, procId: 127 completed 2024-12-06T10:18:53,621 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x022a6e9f to 127.0.0.1:61610 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@4c60eb7d 2024-12-06T10:18:53,625 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@695c2253, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-06T10:18:53,626 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-06T10:18:53,627 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51430, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-06T10:18:53,628 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-06T10:18:53,629 INFO [RS-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43030, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-06T10:18:53,630 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x32c12a30 to 127.0.0.1:61610 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@79b10416 2024-12-06T10:18:53,634 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7177efc9, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-06T10:18:53,634 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x5ef40578 to 127.0.0.1:61610 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@2f142b04 2024-12-06T10:18:53,637 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@61d38088, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-06T10:18:53,638 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x032bb71c to 127.0.0.1:61610 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@de9f076 2024-12-06T10:18:53,643 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7043f683, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-06T10:18:53,644 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x06bc0f7c to 127.0.0.1:61610 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@4414259d 2024-12-06T10:18:53,649 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2b0c2472, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-06T10:18:53,649 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x1b8b6e04 to 127.0.0.1:61610 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@7ed69825 2024-12-06T10:18:53,654 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@34b30c39, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-06T10:18:53,655 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x11193a0c to 127.0.0.1:61610 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@3d672ed2 2024-12-06T10:18:53,660 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5f7c40ba, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-06T10:18:53,661 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x7861b162 to 127.0.0.1:61610 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@7cf40102 2024-12-06T10:18:53,667 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@41b0e7b6, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-06T10:18:53,667 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x154f0f85 to 127.0.0.1:61610 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@496fe03f 2024-12-06T10:18:53,671 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@f2423f3, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-06T10:18:53,672 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x008a917b to 127.0.0.1:61610 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@3652e74d 2024-12-06T10:18:53,676 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@184771cf, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-06T10:18:53,677 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x054c2725 to 127.0.0.1:61610 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@2405c04e 2024-12-06T10:18:53,681 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@76f0408, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-06T10:18:53,686 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-06T10:18:53,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] procedure2.ProcedureExecutor(1098): Stored pid=130, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=130, table=TestAcidGuarantees 2024-12-06T10:18:53,687 DEBUG [hconnection-0x4e91cdd4-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-06T10:18:53,688 DEBUG [hconnection-0x5a9d300d-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-06T10:18:53,688 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=130, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=130, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-06T10:18:53,688 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51434, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-06T10:18:53,688 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=130, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=130, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-06T10:18:53,689 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=131, ppid=130, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-06T10:18:53,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=130 2024-12-06T10:18:53,689 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51440, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-06T10:18:53,690 DEBUG [hconnection-0xd567956-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-06T10:18:53,691 DEBUG [hconnection-0x688be6ce-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-06T10:18:53,691 DEBUG [hconnection-0x59aea262-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-06T10:18:53,691 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51444, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-06T10:18:53,692 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51460, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-06T10:18:53,692 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51466, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-06T10:18:53,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(8581): Flush requested on fa004d65282160f629f3eb2a5c9dca1d 2024-12-06T10:18:53,696 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing fa004d65282160f629f3eb2a5c9dca1d 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-06T10:18:53,697 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK fa004d65282160f629f3eb2a5c9dca1d, store=A 2024-12-06T10:18:53,697 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:18:53,697 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK fa004d65282160f629f3eb2a5c9dca1d, store=B 2024-12-06T10:18:53,697 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:18:53,697 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK fa004d65282160f629f3eb2a5c9dca1d, store=C 2024-12-06T10:18:53,697 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:18:53,700 DEBUG [hconnection-0x4a3a7b34-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-06T10:18:53,700 DEBUG [hconnection-0x16f7dd80-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-06T10:18:53,701 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51492, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-06T10:18:53,701 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51476, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-06T10:18:53,702 DEBUG [hconnection-0x50630653-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-06T10:18:53,703 DEBUG [hconnection-0x64ddb52c-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-06T10:18:53,703 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51508, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-06T10:18:53,704 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51518, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-06T10:18:53,705 DEBUG [hconnection-0x7fe1639a-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-06T10:18:53,705 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51526, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-06T10:18:53,708 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:53,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51434 deadline: 1733480393707, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:53,708 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:53,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51460 deadline: 1733480393707, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:53,708 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:53,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51440 deadline: 1733480393708, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:53,712 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:53,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 4 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51492 deadline: 1733480393708, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:53,718 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:53,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51466 deadline: 1733480393716, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:53,729 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/.tmp/A/213be9078a724502b0543613c355e47e is 50, key is test_row_0/A:col10/1733480333693/Put/seqid=0 2024-12-06T10:18:53,732 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742358_1534 (size=12001) 2024-12-06T10:18:53,733 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=14 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/.tmp/A/213be9078a724502b0543613c355e47e 2024-12-06T10:18:53,754 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/.tmp/B/080a8dcf21fc4f1b9bee7f866a3b6e01 is 50, key is test_row_0/B:col10/1733480333693/Put/seqid=0 2024-12-06T10:18:53,761 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742359_1535 (size=12001) 2024-12-06T10:18:53,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=130 2024-12-06T10:18:53,814 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:53,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51434 deadline: 1733480393809, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:53,814 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:53,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51460 deadline: 1733480393809, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:53,814 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:53,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51440 deadline: 1733480393810, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:53,816 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:53,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 6 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51492 deadline: 1733480393813, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:53,821 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:53,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51466 deadline: 1733480393820, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:53,840 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,33397,1733480204743 2024-12-06T10:18:53,840 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33397 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=131 2024-12-06T10:18:53,840 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d. 2024-12-06T10:18:53,841 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d. as already flushing 2024-12-06T10:18:53,841 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d. 2024-12-06T10:18:53,841 ERROR [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] handler.RSProcedureHandler(58): pid=131 java.io.IOException: Unable to complete flush {ENCODED => fa004d65282160f629f3eb2a5c9dca1d, NAME => 'TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:18:53,841 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=131 java.io.IOException: Unable to complete flush {ENCODED => fa004d65282160f629f3eb2a5c9dca1d, NAME => 'TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:18:53,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.HMaster(4114): Remote procedure failed, pid=131 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => fa004d65282160f629f3eb2a5c9dca1d, NAME => 'TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => fa004d65282160f629f3eb2a5c9dca1d, NAME => 'TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:18:53,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=130 2024-12-06T10:18:53,993 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,33397,1733480204743 2024-12-06T10:18:53,993 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33397 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=131 2024-12-06T10:18:53,994 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d. 2024-12-06T10:18:53,994 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d. as already flushing 2024-12-06T10:18:53,994 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d. 2024-12-06T10:18:53,994 ERROR [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] handler.RSProcedureHandler(58): pid=131 java.io.IOException: Unable to complete flush {ENCODED => fa004d65282160f629f3eb2a5c9dca1d, NAME => 'TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:18:53,994 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=131 java.io.IOException: Unable to complete flush {ENCODED => fa004d65282160f629f3eb2a5c9dca1d, NAME => 'TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:18:53,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.HMaster(4114): Remote procedure failed, pid=131 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => fa004d65282160f629f3eb2a5c9dca1d, NAME => 'TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => fa004d65282160f629f3eb2a5c9dca1d, NAME => 'TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:18:54,018 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:54,018 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51460 deadline: 1733480394015, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:54,018 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:54,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51440 deadline: 1733480394016, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:54,019 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:54,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51492 deadline: 1733480394017, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:54,022 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:54,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51434 deadline: 1733480394020, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:54,026 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:54,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51466 deadline: 1733480394023, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:54,146 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,33397,1733480204743 2024-12-06T10:18:54,146 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33397 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=131 2024-12-06T10:18:54,146 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d. 2024-12-06T10:18:54,147 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d. as already flushing 2024-12-06T10:18:54,147 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d. 2024-12-06T10:18:54,147 ERROR [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] handler.RSProcedureHandler(58): pid=131 java.io.IOException: Unable to complete flush {ENCODED => fa004d65282160f629f3eb2a5c9dca1d, NAME => 'TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:18:54,147 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=131 java.io.IOException: Unable to complete flush {ENCODED => fa004d65282160f629f3eb2a5c9dca1d, NAME => 'TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:18:54,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.HMaster(4114): Remote procedure failed, pid=131 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => fa004d65282160f629f3eb2a5c9dca1d, NAME => 'TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => fa004d65282160f629f3eb2a5c9dca1d, NAME => 'TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:18:54,162 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=14 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/.tmp/B/080a8dcf21fc4f1b9bee7f866a3b6e01 2024-12-06T10:18:54,193 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/.tmp/C/72cd60b0612047c0b4a9999b6c8c5dde is 50, key is test_row_0/C:col10/1733480333693/Put/seqid=0 2024-12-06T10:18:54,201 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742360_1536 (size=12001) 2024-12-06T10:18:54,292 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=130 2024-12-06T10:18:54,298 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,33397,1733480204743 2024-12-06T10:18:54,298 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33397 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=131 2024-12-06T10:18:54,298 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d. 2024-12-06T10:18:54,298 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d. as already flushing 2024-12-06T10:18:54,299 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d. 2024-12-06T10:18:54,299 ERROR [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] handler.RSProcedureHandler(58): pid=131 java.io.IOException: Unable to complete flush {ENCODED => fa004d65282160f629f3eb2a5c9dca1d, NAME => 'TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:18:54,299 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=131 java.io.IOException: Unable to complete flush {ENCODED => fa004d65282160f629f3eb2a5c9dca1d, NAME => 'TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:18:54,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.HMaster(4114): Remote procedure failed, pid=131 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => fa004d65282160f629f3eb2a5c9dca1d, NAME => 'TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => fa004d65282160f629f3eb2a5c9dca1d, NAME => 'TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:18:54,321 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:54,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51492 deadline: 1733480394320, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:54,322 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:54,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51440 deadline: 1733480394320, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:54,324 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:54,324 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51460 deadline: 1733480394321, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:54,328 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:54,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51434 deadline: 1733480394324, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:54,330 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:54,330 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51466 deadline: 1733480394328, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:54,451 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,33397,1733480204743 2024-12-06T10:18:54,451 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33397 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=131 2024-12-06T10:18:54,451 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d. 2024-12-06T10:18:54,451 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d. as already flushing 2024-12-06T10:18:54,452 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d. 2024-12-06T10:18:54,452 ERROR [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] handler.RSProcedureHandler(58): pid=131 java.io.IOException: Unable to complete flush {ENCODED => fa004d65282160f629f3eb2a5c9dca1d, NAME => 'TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:18:54,452 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=131 java.io.IOException: Unable to complete flush {ENCODED => fa004d65282160f629f3eb2a5c9dca1d, NAME => 'TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:18:54,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.HMaster(4114): Remote procedure failed, pid=131 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => fa004d65282160f629f3eb2a5c9dca1d, NAME => 'TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => fa004d65282160f629f3eb2a5c9dca1d, NAME => 'TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:18:54,603 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=14 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/.tmp/C/72cd60b0612047c0b4a9999b6c8c5dde 2024-12-06T10:18:54,603 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,33397,1733480204743 2024-12-06T10:18:54,604 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33397 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=131 2024-12-06T10:18:54,604 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d. 2024-12-06T10:18:54,604 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d. as already flushing 2024-12-06T10:18:54,604 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d. 2024-12-06T10:18:54,604 ERROR [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] handler.RSProcedureHandler(58): pid=131 java.io.IOException: Unable to complete flush {ENCODED => fa004d65282160f629f3eb2a5c9dca1d, NAME => 'TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:18:54,604 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=131 java.io.IOException: Unable to complete flush {ENCODED => fa004d65282160f629f3eb2a5c9dca1d, NAME => 'TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:18:54,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.HMaster(4114): Remote procedure failed, pid=131 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => fa004d65282160f629f3eb2a5c9dca1d, NAME => 'TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => fa004d65282160f629f3eb2a5c9dca1d, NAME => 'TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:18:54,611 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/.tmp/A/213be9078a724502b0543613c355e47e as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/A/213be9078a724502b0543613c355e47e 2024-12-06T10:18:54,614 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/A/213be9078a724502b0543613c355e47e, entries=150, sequenceid=14, filesize=11.7 K 2024-12-06T10:18:54,615 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/.tmp/B/080a8dcf21fc4f1b9bee7f866a3b6e01 as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/B/080a8dcf21fc4f1b9bee7f866a3b6e01 2024-12-06T10:18:54,618 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/B/080a8dcf21fc4f1b9bee7f866a3b6e01, entries=150, sequenceid=14, filesize=11.7 K 2024-12-06T10:18:54,618 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/.tmp/C/72cd60b0612047c0b4a9999b6c8c5dde as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/C/72cd60b0612047c0b4a9999b6c8c5dde 2024-12-06T10:18:54,622 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/C/72cd60b0612047c0b4a9999b6c8c5dde, entries=150, sequenceid=14, filesize=11.7 K 2024-12-06T10:18:54,622 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=147.60 KB/151140 for fa004d65282160f629f3eb2a5c9dca1d in 926ms, sequenceid=14, compaction requested=false 2024-12-06T10:18:54,623 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for fa004d65282160f629f3eb2a5c9dca1d: 2024-12-06T10:18:54,757 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,33397,1733480204743 2024-12-06T10:18:54,757 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33397 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=131 2024-12-06T10:18:54,757 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d. 2024-12-06T10:18:54,757 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HRegion(2837): Flushing fa004d65282160f629f3eb2a5c9dca1d 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-12-06T10:18:54,758 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.CompactingMemStore(205): FLUSHING TO DISK fa004d65282160f629f3eb2a5c9dca1d, store=A 2024-12-06T10:18:54,758 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:18:54,758 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.CompactingMemStore(205): FLUSHING TO DISK fa004d65282160f629f3eb2a5c9dca1d, store=B 2024-12-06T10:18:54,758 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:18:54,758 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.CompactingMemStore(205): FLUSHING TO DISK fa004d65282160f629f3eb2a5c9dca1d, store=C 2024-12-06T10:18:54,758 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:18:54,768 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/.tmp/A/9e4917827bd84af7bcc3bfe3ef1af491 is 50, key is test_row_0/A:col10/1733480333702/Put/seqid=0 2024-12-06T10:18:54,771 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742361_1537 (size=12001) 2024-12-06T10:18:54,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=130 2024-12-06T10:18:54,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(8581): Flush requested on fa004d65282160f629f3eb2a5c9dca1d 2024-12-06T10:18:54,828 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d. as already flushing 2024-12-06T10:18:54,840 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:54,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51440 deadline: 1733480394834, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:54,840 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:54,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51492 deadline: 1733480394835, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:54,840 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:54,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51460 deadline: 1733480394835, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:54,840 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:54,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51434 deadline: 1733480394836, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:54,841 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:54,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51466 deadline: 1733480394837, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:54,944 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:54,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51492 deadline: 1733480394941, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:54,944 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:54,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51440 deadline: 1733480394941, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:54,944 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:54,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51460 deadline: 1733480394941, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:54,945 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:54,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51434 deadline: 1733480394941, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:54,945 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:54,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51466 deadline: 1733480394942, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:55,148 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:55,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51460 deadline: 1733480395145, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:55,149 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:55,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51492 deadline: 1733480395146, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:55,150 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:55,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51466 deadline: 1733480395146, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:55,150 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:55,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51434 deadline: 1733480395146, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:55,150 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:55,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51440 deadline: 1733480395146, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:55,177 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=39 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/.tmp/A/9e4917827bd84af7bcc3bfe3ef1af491 2024-12-06T10:18:55,187 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/.tmp/B/851a20bbe2d044b582a0618bd4e1937d is 50, key is test_row_0/B:col10/1733480333702/Put/seqid=0 2024-12-06T10:18:55,192 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742362_1538 (size=12001) 2024-12-06T10:18:55,453 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:55,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 25 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51460 deadline: 1733480395450, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:55,453 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:55,454 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 25 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51434 deadline: 1733480395451, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:55,454 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:55,454 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51440 deadline: 1733480395452, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:55,454 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:55,454 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51492 deadline: 1733480395452, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:55,454 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:55,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51466 deadline: 1733480395453, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:55,592 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=39 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/.tmp/B/851a20bbe2d044b582a0618bd4e1937d 2024-12-06T10:18:55,602 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/.tmp/C/687c41a39fc04694acd44ef97d79e0d9 is 50, key is test_row_0/C:col10/1733480333702/Put/seqid=0 2024-12-06T10:18:55,621 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742363_1539 (size=12001) 2024-12-06T10:18:55,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=130 2024-12-06T10:18:55,959 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:55,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51492 deadline: 1733480395955, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:55,960 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:55,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51440 deadline: 1733480395956, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:55,960 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:55,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51466 deadline: 1733480395957, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:55,960 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:55,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51460 deadline: 1733480395957, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:55,960 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:55,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51434 deadline: 1733480395957, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:56,022 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=39 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/.tmp/C/687c41a39fc04694acd44ef97d79e0d9 2024-12-06T10:18:56,025 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/.tmp/A/9e4917827bd84af7bcc3bfe3ef1af491 as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/A/9e4917827bd84af7bcc3bfe3ef1af491 2024-12-06T10:18:56,028 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/A/9e4917827bd84af7bcc3bfe3ef1af491, entries=150, sequenceid=39, filesize=11.7 K 2024-12-06T10:18:56,029 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/.tmp/B/851a20bbe2d044b582a0618bd4e1937d as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/B/851a20bbe2d044b582a0618bd4e1937d 2024-12-06T10:18:56,032 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/B/851a20bbe2d044b582a0618bd4e1937d, entries=150, sequenceid=39, filesize=11.7 K 2024-12-06T10:18:56,033 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/.tmp/C/687c41a39fc04694acd44ef97d79e0d9 as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/C/687c41a39fc04694acd44ef97d79e0d9 2024-12-06T10:18:56,039 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/C/687c41a39fc04694acd44ef97d79e0d9, entries=150, sequenceid=39, filesize=11.7 K 2024-12-06T10:18:56,039 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=53.67 KB/54960 for fa004d65282160f629f3eb2a5c9dca1d in 1282ms, sequenceid=39, compaction requested=false 2024-12-06T10:18:56,039 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HRegion(2538): Flush status journal for fa004d65282160f629f3eb2a5c9dca1d: 2024-12-06T10:18:56,039 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d. 2024-12-06T10:18:56,040 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=131 2024-12-06T10:18:56,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.HMaster(4106): Remote procedure done, pid=131 2024-12-06T10:18:56,042 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=131, resume processing ppid=130 2024-12-06T10:18:56,042 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=131, ppid=130, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.3530 sec 2024-12-06T10:18:56,043 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=130, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=130, table=TestAcidGuarantees in 2.3560 sec 2024-12-06T10:18:56,620 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-12-06T10:18:56,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(8581): Flush requested on fa004d65282160f629f3eb2a5c9dca1d 2024-12-06T10:18:56,966 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing fa004d65282160f629f3eb2a5c9dca1d 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-12-06T10:18:56,966 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK fa004d65282160f629f3eb2a5c9dca1d, store=A 2024-12-06T10:18:56,967 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:18:56,967 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK fa004d65282160f629f3eb2a5c9dca1d, store=B 2024-12-06T10:18:56,967 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:18:56,967 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK fa004d65282160f629f3eb2a5c9dca1d, store=C 2024-12-06T10:18:56,967 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:18:56,971 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/.tmp/A/694462eab5524b97b36c8014089bcb29 is 50, key is test_row_0/A:col10/1733480336965/Put/seqid=0 2024-12-06T10:18:56,976 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742364_1540 (size=14341) 2024-12-06T10:18:57,006 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:57,007 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51460 deadline: 1733480397001, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:57,009 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:57,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51492 deadline: 1733480397003, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:57,010 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:57,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51434 deadline: 1733480397004, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:57,010 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:57,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51466 deadline: 1733480397004, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:57,012 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:57,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51440 deadline: 1733480397006, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:57,111 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:57,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51460 deadline: 1733480397109, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:57,114 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:57,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51492 deadline: 1733480397111, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:57,114 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:57,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51434 deadline: 1733480397111, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:57,114 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:57,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51466 deadline: 1733480397111, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:57,119 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:57,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51440 deadline: 1733480397113, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:57,316 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:57,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51460 deadline: 1733480397312, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:57,321 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:57,321 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51434 deadline: 1733480397315, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:57,321 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:57,321 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51466 deadline: 1733480397315, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:57,322 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:57,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51492 deadline: 1733480397316, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:57,325 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:57,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51440 deadline: 1733480397320, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:57,376 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=52 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/.tmp/A/694462eab5524b97b36c8014089bcb29 2024-12-06T10:18:57,383 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/.tmp/B/651f85b097b94b7c99329e5bbaddbb17 is 50, key is test_row_0/B:col10/1733480336965/Put/seqid=0 2024-12-06T10:18:57,393 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742365_1541 (size=12001) 2024-12-06T10:18:57,394 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=52 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/.tmp/B/651f85b097b94b7c99329e5bbaddbb17 2024-12-06T10:18:57,413 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/.tmp/C/3821dc29e34844c7a664312b9be0455a is 50, key is test_row_0/C:col10/1733480336965/Put/seqid=0 2024-12-06T10:18:57,437 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742366_1542 (size=12001) 2024-12-06T10:18:57,437 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=52 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/.tmp/C/3821dc29e34844c7a664312b9be0455a 2024-12-06T10:18:57,442 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/.tmp/A/694462eab5524b97b36c8014089bcb29 as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/A/694462eab5524b97b36c8014089bcb29 2024-12-06T10:18:57,444 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/A/694462eab5524b97b36c8014089bcb29, entries=200, sequenceid=52, filesize=14.0 K 2024-12-06T10:18:57,445 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/.tmp/B/651f85b097b94b7c99329e5bbaddbb17 as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/B/651f85b097b94b7c99329e5bbaddbb17 2024-12-06T10:18:57,448 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/B/651f85b097b94b7c99329e5bbaddbb17, entries=150, sequenceid=52, filesize=11.7 K 2024-12-06T10:18:57,450 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/.tmp/C/3821dc29e34844c7a664312b9be0455a as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/C/3821dc29e34844c7a664312b9be0455a 2024-12-06T10:18:57,453 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/C/3821dc29e34844c7a664312b9be0455a, entries=150, sequenceid=52, filesize=11.7 K 2024-12-06T10:18:57,453 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=134.18 KB/137400 for fa004d65282160f629f3eb2a5c9dca1d in 487ms, sequenceid=52, compaction requested=true 2024-12-06T10:18:57,453 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for fa004d65282160f629f3eb2a5c9dca1d: 2024-12-06T10:18:57,454 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-06T10:18:57,454 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store fa004d65282160f629f3eb2a5c9dca1d:A, priority=-2147483648, current under compaction store size is 1 2024-12-06T10:18:57,454 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T10:18:57,454 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-06T10:18:57,454 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store fa004d65282160f629f3eb2a5c9dca1d:B, priority=-2147483648, current under compaction store size is 2 2024-12-06T10:18:57,454 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T10:18:57,454 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store fa004d65282160f629f3eb2a5c9dca1d:C, priority=-2147483648, current under compaction store size is 3 2024-12-06T10:18:57,454 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-06T10:18:57,455 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-06T10:18:57,455 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 38343 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-06T10:18:57,455 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HStore(1540): fa004d65282160f629f3eb2a5c9dca1d/A is initiating minor compaction (all files) 2024-12-06T10:18:57,455 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HStore(1540): fa004d65282160f629f3eb2a5c9dca1d/B is initiating minor compaction (all files) 2024-12-06T10:18:57,455 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of fa004d65282160f629f3eb2a5c9dca1d/A in TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d. 2024-12-06T10:18:57,455 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/A/213be9078a724502b0543613c355e47e, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/A/9e4917827bd84af7bcc3bfe3ef1af491, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/A/694462eab5524b97b36c8014089bcb29] into tmpdir=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/.tmp, totalSize=37.4 K 2024-12-06T10:18:57,455 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of fa004d65282160f629f3eb2a5c9dca1d/B in TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d. 2024-12-06T10:18:57,455 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/B/080a8dcf21fc4f1b9bee7f866a3b6e01, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/B/851a20bbe2d044b582a0618bd4e1937d, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/B/651f85b097b94b7c99329e5bbaddbb17] into tmpdir=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/.tmp, totalSize=35.2 K 2024-12-06T10:18:57,456 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.Compactor(224): Compacting 213be9078a724502b0543613c355e47e, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=14, earliestPutTs=1733480333691 2024-12-06T10:18:57,456 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.Compactor(224): Compacting 080a8dcf21fc4f1b9bee7f866a3b6e01, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=14, earliestPutTs=1733480333691 2024-12-06T10:18:57,456 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.Compactor(224): Compacting 851a20bbe2d044b582a0618bd4e1937d, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=39, earliestPutTs=1733480333702 2024-12-06T10:18:57,456 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.Compactor(224): Compacting 9e4917827bd84af7bcc3bfe3ef1af491, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=39, earliestPutTs=1733480333702 2024-12-06T10:18:57,456 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.Compactor(224): Compacting 651f85b097b94b7c99329e5bbaddbb17, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=52, earliestPutTs=1733480334834 2024-12-06T10:18:57,456 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.Compactor(224): Compacting 694462eab5524b97b36c8014089bcb29, keycount=200, bloomtype=ROW, size=14.0 K, encoding=NONE, compression=NONE, seqNum=52, earliestPutTs=1733480334827 2024-12-06T10:18:57,465 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): fa004d65282160f629f3eb2a5c9dca1d#B#compaction#455 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-06T10:18:57,465 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/.tmp/B/dbb6d000bed740afb91e9e97dd22c13e is 50, key is test_row_0/B:col10/1733480336965/Put/seqid=0 2024-12-06T10:18:57,468 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): fa004d65282160f629f3eb2a5c9dca1d#A#compaction#456 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-06T10:18:57,468 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/.tmp/A/1b66dbc97bb641bb9413abf67d2ed65d is 50, key is test_row_0/A:col10/1733480336965/Put/seqid=0 2024-12-06T10:18:57,476 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742368_1544 (size=12104) 2024-12-06T10:18:57,481 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742367_1543 (size=12104) 2024-12-06T10:18:57,486 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/.tmp/B/dbb6d000bed740afb91e9e97dd22c13e as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/B/dbb6d000bed740afb91e9e97dd22c13e 2024-12-06T10:18:57,494 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in fa004d65282160f629f3eb2a5c9dca1d/B of fa004d65282160f629f3eb2a5c9dca1d into dbb6d000bed740afb91e9e97dd22c13e(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-06T10:18:57,495 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for fa004d65282160f629f3eb2a5c9dca1d: 2024-12-06T10:18:57,495 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d., storeName=fa004d65282160f629f3eb2a5c9dca1d/B, priority=13, startTime=1733480337454; duration=0sec 2024-12-06T10:18:57,495 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-06T10:18:57,495 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: fa004d65282160f629f3eb2a5c9dca1d:B 2024-12-06T10:18:57,495 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-06T10:18:57,496 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-06T10:18:57,496 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HStore(1540): fa004d65282160f629f3eb2a5c9dca1d/C is initiating minor compaction (all files) 2024-12-06T10:18:57,496 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of fa004d65282160f629f3eb2a5c9dca1d/C in TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d. 2024-12-06T10:18:57,496 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/C/72cd60b0612047c0b4a9999b6c8c5dde, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/C/687c41a39fc04694acd44ef97d79e0d9, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/C/3821dc29e34844c7a664312b9be0455a] into tmpdir=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/.tmp, totalSize=35.2 K 2024-12-06T10:18:57,497 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.Compactor(224): Compacting 72cd60b0612047c0b4a9999b6c8c5dde, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=14, earliestPutTs=1733480333691 2024-12-06T10:18:57,497 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.Compactor(224): Compacting 687c41a39fc04694acd44ef97d79e0d9, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=39, earliestPutTs=1733480333702 2024-12-06T10:18:57,497 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.Compactor(224): Compacting 3821dc29e34844c7a664312b9be0455a, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=52, earliestPutTs=1733480334834 2024-12-06T10:18:57,512 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): fa004d65282160f629f3eb2a5c9dca1d#C#compaction#457 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-06T10:18:57,512 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/.tmp/C/1e435e337b394cc995635d67f8a264c1 is 50, key is test_row_0/C:col10/1733480336965/Put/seqid=0 2024-12-06T10:18:57,516 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742369_1545 (size=12104) 2024-12-06T10:18:57,520 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/.tmp/C/1e435e337b394cc995635d67f8a264c1 as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/C/1e435e337b394cc995635d67f8a264c1 2024-12-06T10:18:57,524 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in fa004d65282160f629f3eb2a5c9dca1d/C of fa004d65282160f629f3eb2a5c9dca1d into 1e435e337b394cc995635d67f8a264c1(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-06T10:18:57,524 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for fa004d65282160f629f3eb2a5c9dca1d: 2024-12-06T10:18:57,524 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d., storeName=fa004d65282160f629f3eb2a5c9dca1d/C, priority=13, startTime=1733480337454; duration=0sec 2024-12-06T10:18:57,524 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T10:18:57,524 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: fa004d65282160f629f3eb2a5c9dca1d:C 2024-12-06T10:18:57,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(8581): Flush requested on fa004d65282160f629f3eb2a5c9dca1d 2024-12-06T10:18:57,622 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing fa004d65282160f629f3eb2a5c9dca1d 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-12-06T10:18:57,622 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK fa004d65282160f629f3eb2a5c9dca1d, store=A 2024-12-06T10:18:57,622 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:18:57,622 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK fa004d65282160f629f3eb2a5c9dca1d, store=B 2024-12-06T10:18:57,622 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:18:57,622 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK fa004d65282160f629f3eb2a5c9dca1d, store=C 2024-12-06T10:18:57,623 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:18:57,627 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/.tmp/A/4a60b7efd80744a3b21e24a48bcfb2fc is 50, key is test_row_0/A:col10/1733480337003/Put/seqid=0 2024-12-06T10:18:57,630 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742370_1546 (size=14341) 2024-12-06T10:18:57,667 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:57,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51440 deadline: 1733480397658, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:57,667 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:57,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51492 deadline: 1733480397658, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:57,670 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:57,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51434 deadline: 1733480397665, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:57,674 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:57,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51460 deadline: 1733480397666, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:57,674 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:57,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51466 deadline: 1733480397667, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:57,772 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:57,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51492 deadline: 1733480397769, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:57,772 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:57,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51440 deadline: 1733480397769, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:57,777 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:57,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51434 deadline: 1733480397771, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:57,781 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:57,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51460 deadline: 1733480397775, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:57,781 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:57,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51466 deadline: 1733480397775, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:57,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=130 2024-12-06T10:18:57,794 INFO [Thread-2350 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 130 completed 2024-12-06T10:18:57,795 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-06T10:18:57,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] procedure2.ProcedureExecutor(1098): Stored pid=132, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=132, table=TestAcidGuarantees 2024-12-06T10:18:57,797 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=132, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=132, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-06T10:18:57,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=132 2024-12-06T10:18:57,797 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=132, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=132, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-06T10:18:57,797 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=133, ppid=132, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-06T10:18:57,881 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/.tmp/A/1b66dbc97bb641bb9413abf67d2ed65d as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/A/1b66dbc97bb641bb9413abf67d2ed65d 2024-12-06T10:18:57,886 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in fa004d65282160f629f3eb2a5c9dca1d/A of fa004d65282160f629f3eb2a5c9dca1d into 1b66dbc97bb641bb9413abf67d2ed65d(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-06T10:18:57,886 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for fa004d65282160f629f3eb2a5c9dca1d: 2024-12-06T10:18:57,886 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d., storeName=fa004d65282160f629f3eb2a5c9dca1d/A, priority=13, startTime=1733480337454; duration=0sec 2024-12-06T10:18:57,886 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T10:18:57,886 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: fa004d65282160f629f3eb2a5c9dca1d:A 2024-12-06T10:18:57,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=132 2024-12-06T10:18:57,949 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,33397,1733480204743 2024-12-06T10:18:57,949 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33397 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=133 2024-12-06T10:18:57,949 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d. 2024-12-06T10:18:57,949 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d. as already flushing 2024-12-06T10:18:57,950 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d. 2024-12-06T10:18:57,950 ERROR [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] handler.RSProcedureHandler(58): pid=133 java.io.IOException: Unable to complete flush {ENCODED => fa004d65282160f629f3eb2a5c9dca1d, NAME => 'TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:18:57,950 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=133 java.io.IOException: Unable to complete flush {ENCODED => fa004d65282160f629f3eb2a5c9dca1d, NAME => 'TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:18:57,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.HMaster(4114): Remote procedure failed, pid=133 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => fa004d65282160f629f3eb2a5c9dca1d, NAME => 'TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => fa004d65282160f629f3eb2a5c9dca1d, NAME => 'TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:18:57,975 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:57,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51492 deadline: 1733480397974, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:57,975 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:57,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51440 deadline: 1733480397975, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:57,982 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:57,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51434 deadline: 1733480397979, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:57,983 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:57,984 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51460 deadline: 1733480397982, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:57,984 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:57,984 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51466 deadline: 1733480397982, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:58,031 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=78 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/.tmp/A/4a60b7efd80744a3b21e24a48bcfb2fc 2024-12-06T10:18:58,038 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/.tmp/B/72db4305aa1d4756a1d21ef1833dbd1e is 50, key is test_row_0/B:col10/1733480337003/Put/seqid=0 2024-12-06T10:18:58,053 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742371_1547 (size=12001) 2024-12-06T10:18:58,054 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=78 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/.tmp/B/72db4305aa1d4756a1d21ef1833dbd1e 2024-12-06T10:18:58,070 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/.tmp/C/f730a319a48e4ea1a3a714e975328190 is 50, key is test_row_0/C:col10/1733480337003/Put/seqid=0 2024-12-06T10:18:58,077 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742372_1548 (size=12001) 2024-12-06T10:18:58,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=132 2024-12-06T10:18:58,101 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,33397,1733480204743 2024-12-06T10:18:58,101 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33397 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=133 2024-12-06T10:18:58,102 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d. 2024-12-06T10:18:58,102 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d. as already flushing 2024-12-06T10:18:58,102 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d. 2024-12-06T10:18:58,102 ERROR [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] handler.RSProcedureHandler(58): pid=133 java.io.IOException: Unable to complete flush {ENCODED => fa004d65282160f629f3eb2a5c9dca1d, NAME => 'TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:18:58,102 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=133 java.io.IOException: Unable to complete flush {ENCODED => fa004d65282160f629f3eb2a5c9dca1d, NAME => 'TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:18:58,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.HMaster(4114): Remote procedure failed, pid=133 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => fa004d65282160f629f3eb2a5c9dca1d, NAME => 'TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => fa004d65282160f629f3eb2a5c9dca1d, NAME => 'TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:18:58,254 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,33397,1733480204743 2024-12-06T10:18:58,254 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33397 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=133 2024-12-06T10:18:58,256 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d. 2024-12-06T10:18:58,256 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d. as already flushing 2024-12-06T10:18:58,256 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d. 2024-12-06T10:18:58,256 ERROR [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] handler.RSProcedureHandler(58): pid=133 java.io.IOException: Unable to complete flush {ENCODED => fa004d65282160f629f3eb2a5c9dca1d, NAME => 'TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:18:58,256 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=133 java.io.IOException: Unable to complete flush {ENCODED => fa004d65282160f629f3eb2a5c9dca1d, NAME => 'TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:18:58,257 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.HMaster(4114): Remote procedure failed, pid=133 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => fa004d65282160f629f3eb2a5c9dca1d, NAME => 'TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => fa004d65282160f629f3eb2a5c9dca1d, NAME => 'TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:18:58,280 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:58,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51492 deadline: 1733480398276, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:58,281 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:58,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51440 deadline: 1733480398278, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:58,285 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:58,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51434 deadline: 1733480398283, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:58,289 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:58,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51466 deadline: 1733480398287, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:58,289 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:58,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51460 deadline: 1733480398287, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:58,399 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=132 2024-12-06T10:18:58,408 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,33397,1733480204743 2024-12-06T10:18:58,408 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33397 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=133 2024-12-06T10:18:58,409 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d. 2024-12-06T10:18:58,409 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d. as already flushing 2024-12-06T10:18:58,409 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d. 2024-12-06T10:18:58,409 ERROR [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] handler.RSProcedureHandler(58): pid=133 java.io.IOException: Unable to complete flush {ENCODED => fa004d65282160f629f3eb2a5c9dca1d, NAME => 'TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:18:58,409 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=133 java.io.IOException: Unable to complete flush {ENCODED => fa004d65282160f629f3eb2a5c9dca1d, NAME => 'TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:18:58,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.HMaster(4114): Remote procedure failed, pid=133 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => fa004d65282160f629f3eb2a5c9dca1d, NAME => 'TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => fa004d65282160f629f3eb2a5c9dca1d, NAME => 'TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:18:58,478 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=78 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/.tmp/C/f730a319a48e4ea1a3a714e975328190 2024-12-06T10:18:58,482 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/.tmp/A/4a60b7efd80744a3b21e24a48bcfb2fc as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/A/4a60b7efd80744a3b21e24a48bcfb2fc 2024-12-06T10:18:58,485 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/A/4a60b7efd80744a3b21e24a48bcfb2fc, entries=200, sequenceid=78, filesize=14.0 K 2024-12-06T10:18:58,486 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/.tmp/B/72db4305aa1d4756a1d21ef1833dbd1e as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/B/72db4305aa1d4756a1d21ef1833dbd1e 2024-12-06T10:18:58,495 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/B/72db4305aa1d4756a1d21ef1833dbd1e, entries=150, sequenceid=78, filesize=11.7 K 2024-12-06T10:18:58,496 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/.tmp/C/f730a319a48e4ea1a3a714e975328190 as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/C/f730a319a48e4ea1a3a714e975328190 2024-12-06T10:18:58,500 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/C/f730a319a48e4ea1a3a714e975328190, entries=150, sequenceid=78, filesize=11.7 K 2024-12-06T10:18:58,501 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=60.38 KB/61830 for fa004d65282160f629f3eb2a5c9dca1d in 879ms, sequenceid=78, compaction requested=false 2024-12-06T10:18:58,501 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for fa004d65282160f629f3eb2a5c9dca1d: 2024-12-06T10:18:58,561 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,33397,1733480204743 2024-12-06T10:18:58,561 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33397 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=133 2024-12-06T10:18:58,561 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d. 2024-12-06T10:18:58,561 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegion(2837): Flushing fa004d65282160f629f3eb2a5c9dca1d 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-12-06T10:18:58,562 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.CompactingMemStore(205): FLUSHING TO DISK fa004d65282160f629f3eb2a5c9dca1d, store=A 2024-12-06T10:18:58,562 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:18:58,562 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.CompactingMemStore(205): FLUSHING TO DISK fa004d65282160f629f3eb2a5c9dca1d, store=B 2024-12-06T10:18:58,562 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:18:58,562 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.CompactingMemStore(205): FLUSHING TO DISK fa004d65282160f629f3eb2a5c9dca1d, store=C 2024-12-06T10:18:58,562 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:18:58,565 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/.tmp/A/07d426ba1bd44b2f9cd1e3185c956a7e is 50, key is test_row_0/A:col10/1733480337666/Put/seqid=0 2024-12-06T10:18:58,569 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742373_1549 (size=12001) 2024-12-06T10:18:58,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(8581): Flush requested on fa004d65282160f629f3eb2a5c9dca1d 2024-12-06T10:18:58,788 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d. as already flushing 2024-12-06T10:18:58,827 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:58,827 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:58,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51434 deadline: 1733480398819, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:58,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51492 deadline: 1733480398821, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:58,832 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:58,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51460 deadline: 1733480398826, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:58,832 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:58,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51466 deadline: 1733480398827, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:58,833 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:58,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51440 deadline: 1733480398827, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:58,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=132 2024-12-06T10:18:58,929 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:58,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51434 deadline: 1733480398928, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:58,929 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:58,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51492 deadline: 1733480398929, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:58,938 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:58,938 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:58,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51466 deadline: 1733480398933, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:58,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51460 deadline: 1733480398933, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:58,938 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:58,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51440 deadline: 1733480398934, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:58,970 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=91 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/.tmp/A/07d426ba1bd44b2f9cd1e3185c956a7e 2024-12-06T10:18:58,977 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/.tmp/B/ed07336465164338b0544d78e058876e is 50, key is test_row_0/B:col10/1733480337666/Put/seqid=0 2024-12-06T10:18:58,981 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742374_1550 (size=12001) 2024-12-06T10:18:59,135 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:59,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51492 deadline: 1733480399130, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:59,135 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:59,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51434 deadline: 1733480399130, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:59,144 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:59,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51466 deadline: 1733480399140, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:59,144 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:59,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51440 deadline: 1733480399140, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:59,145 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:59,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51460 deadline: 1733480399140, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:59,383 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=91 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/.tmp/B/ed07336465164338b0544d78e058876e 2024-12-06T10:18:59,389 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/.tmp/C/3508611c5da44ec29d34d92c6e30187c is 50, key is test_row_0/C:col10/1733480337666/Put/seqid=0 2024-12-06T10:18:59,393 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742375_1551 (size=12001) 2024-12-06T10:18:59,439 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:59,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51434 deadline: 1733480399437, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:59,439 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:59,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51492 deadline: 1733480399438, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:59,448 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:59,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51466 deadline: 1733480399445, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:59,448 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:59,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51440 deadline: 1733480399445, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:59,449 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:59,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51460 deadline: 1733480399446, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:59,793 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=91 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/.tmp/C/3508611c5da44ec29d34d92c6e30187c 2024-12-06T10:18:59,797 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/.tmp/A/07d426ba1bd44b2f9cd1e3185c956a7e as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/A/07d426ba1bd44b2f9cd1e3185c956a7e 2024-12-06T10:18:59,801 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/A/07d426ba1bd44b2f9cd1e3185c956a7e, entries=150, sequenceid=91, filesize=11.7 K 2024-12-06T10:18:59,801 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/.tmp/B/ed07336465164338b0544d78e058876e as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/B/ed07336465164338b0544d78e058876e 2024-12-06T10:18:59,805 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/B/ed07336465164338b0544d78e058876e, entries=150, sequenceid=91, filesize=11.7 K 2024-12-06T10:18:59,805 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/.tmp/C/3508611c5da44ec29d34d92c6e30187c as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/C/3508611c5da44ec29d34d92c6e30187c 2024-12-06T10:18:59,809 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/C/3508611c5da44ec29d34d92c6e30187c, entries=150, sequenceid=91, filesize=11.7 K 2024-12-06T10:18:59,810 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for fa004d65282160f629f3eb2a5c9dca1d in 1249ms, sequenceid=91, compaction requested=true 2024-12-06T10:18:59,810 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegion(2538): Flush status journal for fa004d65282160f629f3eb2a5c9dca1d: 2024-12-06T10:18:59,810 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d. 2024-12-06T10:18:59,810 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=133 2024-12-06T10:18:59,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.HMaster(4106): Remote procedure done, pid=133 2024-12-06T10:18:59,813 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=133, resume processing ppid=132 2024-12-06T10:18:59,813 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=133, ppid=132, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.0140 sec 2024-12-06T10:18:59,814 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=132, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=132, table=TestAcidGuarantees in 2.0180 sec 2024-12-06T10:18:59,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=132 2024-12-06T10:18:59,901 INFO [Thread-2350 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 132 completed 2024-12-06T10:18:59,902 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-06T10:18:59,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] procedure2.ProcedureExecutor(1098): Stored pid=134, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=134, table=TestAcidGuarantees 2024-12-06T10:18:59,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=134 2024-12-06T10:18:59,903 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=134, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=134, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-06T10:18:59,904 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=134, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=134, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-06T10:18:59,904 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=135, ppid=134, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-06T10:18:59,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(8581): Flush requested on fa004d65282160f629f3eb2a5c9dca1d 2024-12-06T10:18:59,943 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing fa004d65282160f629f3eb2a5c9dca1d 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-12-06T10:18:59,944 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK fa004d65282160f629f3eb2a5c9dca1d, store=A 2024-12-06T10:18:59,944 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:18:59,944 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK fa004d65282160f629f3eb2a5c9dca1d, store=B 2024-12-06T10:18:59,944 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:18:59,944 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK fa004d65282160f629f3eb2a5c9dca1d, store=C 2024-12-06T10:18:59,944 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:18:59,948 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/.tmp/A/92bff4133e2c4828924453d58bbaa7c5 is 50, key is test_row_0/A:col10/1733480338826/Put/seqid=0 2024-12-06T10:18:59,954 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742376_1552 (size=14341) 2024-12-06T10:18:59,962 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:59,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51434 deadline: 1733480399956, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:59,965 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:59,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51460 deadline: 1733480399959, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:59,965 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:59,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51466 deadline: 1733480399960, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:59,965 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:59,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51440 deadline: 1733480399961, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:18:59,970 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:18:59,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51492 deadline: 1733480399962, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:00,004 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=134 2024-12-06T10:19:00,056 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,33397,1733480204743 2024-12-06T10:19:00,057 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33397 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=135 2024-12-06T10:19:00,057 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d. 2024-12-06T10:19:00,057 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d. as already flushing 2024-12-06T10:19:00,057 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d. 2024-12-06T10:19:00,057 ERROR [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] handler.RSProcedureHandler(58): pid=135 java.io.IOException: Unable to complete flush {ENCODED => fa004d65282160f629f3eb2a5c9dca1d, NAME => 'TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:19:00,057 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=135 java.io.IOException: Unable to complete flush {ENCODED => fa004d65282160f629f3eb2a5c9dca1d, NAME => 'TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:19:00,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.HMaster(4114): Remote procedure failed, pid=135 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => fa004d65282160f629f3eb2a5c9dca1d, NAME => 'TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => fa004d65282160f629f3eb2a5c9dca1d, NAME => 'TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:19:00,070 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:00,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51434 deadline: 1733480400063, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:00,071 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:00,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51460 deadline: 1733480400066, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:00,071 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:00,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51466 deadline: 1733480400067, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:00,072 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:00,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51440 deadline: 1733480400067, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:00,072 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:00,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51492 deadline: 1733480400071, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:00,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=134 2024-12-06T10:19:00,209 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,33397,1733480204743 2024-12-06T10:19:00,209 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33397 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=135 2024-12-06T10:19:00,209 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d. 2024-12-06T10:19:00,210 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d. as already flushing 2024-12-06T10:19:00,210 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d. 2024-12-06T10:19:00,210 ERROR [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] handler.RSProcedureHandler(58): pid=135 java.io.IOException: Unable to complete flush {ENCODED => fa004d65282160f629f3eb2a5c9dca1d, NAME => 'TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:19:00,210 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=135 java.io.IOException: Unable to complete flush {ENCODED => fa004d65282160f629f3eb2a5c9dca1d, NAME => 'TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:19:00,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.HMaster(4114): Remote procedure failed, pid=135 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => fa004d65282160f629f3eb2a5c9dca1d, NAME => 'TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => fa004d65282160f629f3eb2a5c9dca1d, NAME => 'TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:19:00,275 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:00,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51434 deadline: 1733480400272, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:00,275 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:00,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51460 deadline: 1733480400272, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:00,275 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:00,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51466 deadline: 1733480400273, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:00,275 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:00,276 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51440 deadline: 1733480400273, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:00,278 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:00,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51492 deadline: 1733480400274, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:00,354 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=116 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/.tmp/A/92bff4133e2c4828924453d58bbaa7c5 2024-12-06T10:19:00,361 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/.tmp/B/e80c848838ad4af284b6e1d53a60a69a is 50, key is test_row_0/B:col10/1733480338826/Put/seqid=0 2024-12-06T10:19:00,362 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,33397,1733480204743 2024-12-06T10:19:00,363 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33397 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=135 2024-12-06T10:19:00,364 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d. 2024-12-06T10:19:00,364 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d. as already flushing 2024-12-06T10:19:00,364 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d. 2024-12-06T10:19:00,364 ERROR [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] handler.RSProcedureHandler(58): pid=135 java.io.IOException: Unable to complete flush {ENCODED => fa004d65282160f629f3eb2a5c9dca1d, NAME => 'TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:19:00,364 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=135 java.io.IOException: Unable to complete flush {ENCODED => fa004d65282160f629f3eb2a5c9dca1d, NAME => 'TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:19:00,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.HMaster(4114): Remote procedure failed, pid=135 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => fa004d65282160f629f3eb2a5c9dca1d, NAME => 'TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => fa004d65282160f629f3eb2a5c9dca1d, NAME => 'TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:19:00,373 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742377_1553 (size=12001) 2024-12-06T10:19:00,506 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=134 2024-12-06T10:19:00,519 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,33397,1733480204743 2024-12-06T10:19:00,520 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33397 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=135 2024-12-06T10:19:00,520 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d. 2024-12-06T10:19:00,520 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d. as already flushing 2024-12-06T10:19:00,520 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d. 2024-12-06T10:19:00,520 ERROR [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] handler.RSProcedureHandler(58): pid=135 java.io.IOException: Unable to complete flush {ENCODED => fa004d65282160f629f3eb2a5c9dca1d, NAME => 'TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:19:00,520 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=135 java.io.IOException: Unable to complete flush {ENCODED => fa004d65282160f629f3eb2a5c9dca1d, NAME => 'TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:19:00,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.HMaster(4114): Remote procedure failed, pid=135 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => fa004d65282160f629f3eb2a5c9dca1d, NAME => 'TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => fa004d65282160f629f3eb2a5c9dca1d, NAME => 'TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:19:00,577 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:00,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 73 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51434 deadline: 1733480400576, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:00,577 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:00,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51440 deadline: 1733480400576, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:00,581 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:00,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51466 deadline: 1733480400577, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:00,581 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:00,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51460 deadline: 1733480400578, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:00,582 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:00,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51492 deadline: 1733480400579, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:00,672 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,33397,1733480204743 2024-12-06T10:19:00,672 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33397 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=135 2024-12-06T10:19:00,672 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d. 2024-12-06T10:19:00,672 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d. as already flushing 2024-12-06T10:19:00,672 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d. 2024-12-06T10:19:00,672 ERROR [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] handler.RSProcedureHandler(58): pid=135 java.io.IOException: Unable to complete flush {ENCODED => fa004d65282160f629f3eb2a5c9dca1d, NAME => 'TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:19:00,673 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=135 java.io.IOException: Unable to complete flush {ENCODED => fa004d65282160f629f3eb2a5c9dca1d, NAME => 'TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:19:00,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.HMaster(4114): Remote procedure failed, pid=135 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => fa004d65282160f629f3eb2a5c9dca1d, NAME => 'TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => fa004d65282160f629f3eb2a5c9dca1d, NAME => 'TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:19:00,777 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=116 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/.tmp/B/e80c848838ad4af284b6e1d53a60a69a 2024-12-06T10:19:00,784 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/.tmp/C/aeaab6fa81604ee4bc7777ccc75d3a24 is 50, key is test_row_0/C:col10/1733480338826/Put/seqid=0 2024-12-06T10:19:00,805 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742378_1554 (size=12001) 2024-12-06T10:19:00,824 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,33397,1733480204743 2024-12-06T10:19:00,825 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33397 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=135 2024-12-06T10:19:00,825 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d. 2024-12-06T10:19:00,825 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d. as already flushing 2024-12-06T10:19:00,825 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d. 2024-12-06T10:19:00,825 ERROR [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] handler.RSProcedureHandler(58): pid=135 java.io.IOException: Unable to complete flush {ENCODED => fa004d65282160f629f3eb2a5c9dca1d, NAME => 'TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:19:00,825 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=135 java.io.IOException: Unable to complete flush {ENCODED => fa004d65282160f629f3eb2a5c9dca1d, NAME => 'TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:19:00,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.HMaster(4114): Remote procedure failed, pid=135 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => fa004d65282160f629f3eb2a5c9dca1d, NAME => 'TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => fa004d65282160f629f3eb2a5c9dca1d, NAME => 'TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:19:00,980 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,33397,1733480204743 2024-12-06T10:19:00,980 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33397 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=135 2024-12-06T10:19:00,980 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d. 2024-12-06T10:19:00,980 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d. as already flushing 2024-12-06T10:19:00,980 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d. 2024-12-06T10:19:00,980 ERROR [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] handler.RSProcedureHandler(58): pid=135 java.io.IOException: Unable to complete flush {ENCODED => fa004d65282160f629f3eb2a5c9dca1d, NAME => 'TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:19:00,981 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=135 java.io.IOException: Unable to complete flush {ENCODED => fa004d65282160f629f3eb2a5c9dca1d, NAME => 'TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:19:00,981 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.HMaster(4114): Remote procedure failed, pid=135 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => fa004d65282160f629f3eb2a5c9dca1d, NAME => 'TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => fa004d65282160f629f3eb2a5c9dca1d, NAME => 'TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:19:01,007 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=134 2024-12-06T10:19:01,082 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:01,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51440 deadline: 1733480401078, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:01,085 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:01,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 75 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51434 deadline: 1733480401081, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:01,085 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:01,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51460 deadline: 1733480401083, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:01,088 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:01,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51492 deadline: 1733480401084, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:01,088 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:01,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51466 deadline: 1733480401086, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:01,132 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,33397,1733480204743 2024-12-06T10:19:01,133 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33397 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=135 2024-12-06T10:19:01,133 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d. 2024-12-06T10:19:01,133 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d. as already flushing 2024-12-06T10:19:01,133 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d. 2024-12-06T10:19:01,133 ERROR [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] handler.RSProcedureHandler(58): pid=135 java.io.IOException: Unable to complete flush {ENCODED => fa004d65282160f629f3eb2a5c9dca1d, NAME => 'TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:19:01,133 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=135 java.io.IOException: Unable to complete flush {ENCODED => fa004d65282160f629f3eb2a5c9dca1d, NAME => 'TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:19:01,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.HMaster(4114): Remote procedure failed, pid=135 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => fa004d65282160f629f3eb2a5c9dca1d, NAME => 'TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => fa004d65282160f629f3eb2a5c9dca1d, NAME => 'TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:19:01,206 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=116 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/.tmp/C/aeaab6fa81604ee4bc7777ccc75d3a24 2024-12-06T10:19:01,210 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/.tmp/A/92bff4133e2c4828924453d58bbaa7c5 as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/A/92bff4133e2c4828924453d58bbaa7c5 2024-12-06T10:19:01,214 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/A/92bff4133e2c4828924453d58bbaa7c5, entries=200, sequenceid=116, filesize=14.0 K 2024-12-06T10:19:01,215 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/.tmp/B/e80c848838ad4af284b6e1d53a60a69a as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/B/e80c848838ad4af284b6e1d53a60a69a 2024-12-06T10:19:01,218 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/B/e80c848838ad4af284b6e1d53a60a69a, entries=150, sequenceid=116, filesize=11.7 K 2024-12-06T10:19:01,219 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/.tmp/C/aeaab6fa81604ee4bc7777ccc75d3a24 as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/C/aeaab6fa81604ee4bc7777ccc75d3a24 2024-12-06T10:19:01,222 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/C/aeaab6fa81604ee4bc7777ccc75d3a24, entries=150, sequenceid=116, filesize=11.7 K 2024-12-06T10:19:01,223 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=53.67 KB/54960 for fa004d65282160f629f3eb2a5c9dca1d in 1279ms, sequenceid=116, compaction requested=true 2024-12-06T10:19:01,223 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for fa004d65282160f629f3eb2a5c9dca1d: 2024-12-06T10:19:01,223 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-06T10:19:01,223 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store fa004d65282160f629f3eb2a5c9dca1d:A, priority=-2147483648, current under compaction store size is 1 2024-12-06T10:19:01,223 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T10:19:01,223 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-06T10:19:01,223 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store fa004d65282160f629f3eb2a5c9dca1d:B, priority=-2147483648, current under compaction store size is 2 2024-12-06T10:19:01,223 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T10:19:01,223 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store fa004d65282160f629f3eb2a5c9dca1d:C, priority=-2147483648, current under compaction store size is 3 2024-12-06T10:19:01,223 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-06T10:19:01,224 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 52787 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-06T10:19:01,224 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48107 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-06T10:19:01,224 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HStore(1540): fa004d65282160f629f3eb2a5c9dca1d/A is initiating minor compaction (all files) 2024-12-06T10:19:01,224 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HStore(1540): fa004d65282160f629f3eb2a5c9dca1d/B is initiating minor compaction (all files) 2024-12-06T10:19:01,224 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of fa004d65282160f629f3eb2a5c9dca1d/B in TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d. 2024-12-06T10:19:01,224 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of fa004d65282160f629f3eb2a5c9dca1d/A in TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d. 2024-12-06T10:19:01,225 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/B/dbb6d000bed740afb91e9e97dd22c13e, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/B/72db4305aa1d4756a1d21ef1833dbd1e, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/B/ed07336465164338b0544d78e058876e, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/B/e80c848838ad4af284b6e1d53a60a69a] into tmpdir=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/.tmp, totalSize=47.0 K 2024-12-06T10:19:01,225 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/A/1b66dbc97bb641bb9413abf67d2ed65d, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/A/4a60b7efd80744a3b21e24a48bcfb2fc, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/A/07d426ba1bd44b2f9cd1e3185c956a7e, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/A/92bff4133e2c4828924453d58bbaa7c5] into tmpdir=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/.tmp, totalSize=51.5 K 2024-12-06T10:19:01,225 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.Compactor(224): Compacting dbb6d000bed740afb91e9e97dd22c13e, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=52, earliestPutTs=1733480334834 2024-12-06T10:19:01,225 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.Compactor(224): Compacting 72db4305aa1d4756a1d21ef1833dbd1e, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=78, earliestPutTs=1733480336991 2024-12-06T10:19:01,225 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1b66dbc97bb641bb9413abf67d2ed65d, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=52, earliestPutTs=1733480334834 2024-12-06T10:19:01,225 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.Compactor(224): Compacting 4a60b7efd80744a3b21e24a48bcfb2fc, keycount=200, bloomtype=ROW, size=14.0 K, encoding=NONE, compression=NONE, seqNum=78, earliestPutTs=1733480336991 2024-12-06T10:19:01,225 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.Compactor(224): Compacting ed07336465164338b0544d78e058876e, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=91, earliestPutTs=1733480337656 2024-12-06T10:19:01,226 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.Compactor(224): Compacting e80c848838ad4af284b6e1d53a60a69a, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=116, earliestPutTs=1733480338825 2024-12-06T10:19:01,226 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.Compactor(224): Compacting 07d426ba1bd44b2f9cd1e3185c956a7e, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=91, earliestPutTs=1733480337656 2024-12-06T10:19:01,226 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.Compactor(224): Compacting 92bff4133e2c4828924453d58bbaa7c5, keycount=200, bloomtype=ROW, size=14.0 K, encoding=NONE, compression=NONE, seqNum=116, earliestPutTs=1733480338816 2024-12-06T10:19:01,235 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): fa004d65282160f629f3eb2a5c9dca1d#B#compaction#467 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-06T10:19:01,236 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/.tmp/B/92b2b77b87cb4b248d55da41c331e1d3 is 50, key is test_row_0/B:col10/1733480338826/Put/seqid=0 2024-12-06T10:19:01,242 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): fa004d65282160f629f3eb2a5c9dca1d#A#compaction#468 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-06T10:19:01,243 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/.tmp/A/6eab958228124930a5383b219bfe0321 is 50, key is test_row_0/A:col10/1733480338826/Put/seqid=0 2024-12-06T10:19:01,249 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742379_1555 (size=12241) 2024-12-06T10:19:01,257 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742380_1556 (size=12241) 2024-12-06T10:19:01,285 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,33397,1733480204743 2024-12-06T10:19:01,286 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33397 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=135 2024-12-06T10:19:01,286 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d. 2024-12-06T10:19:01,286 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegion(2837): Flushing fa004d65282160f629f3eb2a5c9dca1d 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-06T10:19:01,286 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.CompactingMemStore(205): FLUSHING TO DISK fa004d65282160f629f3eb2a5c9dca1d, store=A 2024-12-06T10:19:01,286 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:19:01,286 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.CompactingMemStore(205): FLUSHING TO DISK fa004d65282160f629f3eb2a5c9dca1d, store=B 2024-12-06T10:19:01,286 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:19:01,286 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.CompactingMemStore(205): FLUSHING TO DISK fa004d65282160f629f3eb2a5c9dca1d, store=C 2024-12-06T10:19:01,286 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:19:01,290 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/.tmp/A/33edb85437004723baa4de97de365b34 is 50, key is test_row_0/A:col10/1733480339961/Put/seqid=0 2024-12-06T10:19:01,294 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742381_1557 (size=12001) 2024-12-06T10:19:01,655 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/.tmp/B/92b2b77b87cb4b248d55da41c331e1d3 as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/B/92b2b77b87cb4b248d55da41c331e1d3 2024-12-06T10:19:01,661 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in fa004d65282160f629f3eb2a5c9dca1d/B of fa004d65282160f629f3eb2a5c9dca1d into 92b2b77b87cb4b248d55da41c331e1d3(size=12.0 K), total size for store is 12.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-06T10:19:01,661 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for fa004d65282160f629f3eb2a5c9dca1d: 2024-12-06T10:19:01,661 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d., storeName=fa004d65282160f629f3eb2a5c9dca1d/B, priority=12, startTime=1733480341223; duration=0sec 2024-12-06T10:19:01,661 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-06T10:19:01,661 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: fa004d65282160f629f3eb2a5c9dca1d:B 2024-12-06T10:19:01,661 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-06T10:19:01,662 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/.tmp/A/6eab958228124930a5383b219bfe0321 as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/A/6eab958228124930a5383b219bfe0321 2024-12-06T10:19:01,662 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48107 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-06T10:19:01,662 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HStore(1540): fa004d65282160f629f3eb2a5c9dca1d/C is initiating minor compaction (all files) 2024-12-06T10:19:01,663 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of fa004d65282160f629f3eb2a5c9dca1d/C in TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d. 2024-12-06T10:19:01,663 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/C/1e435e337b394cc995635d67f8a264c1, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/C/f730a319a48e4ea1a3a714e975328190, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/C/3508611c5da44ec29d34d92c6e30187c, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/C/aeaab6fa81604ee4bc7777ccc75d3a24] into tmpdir=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/.tmp, totalSize=47.0 K 2024-12-06T10:19:01,664 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.Compactor(224): Compacting 1e435e337b394cc995635d67f8a264c1, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=52, earliestPutTs=1733480334834 2024-12-06T10:19:01,668 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.Compactor(224): Compacting f730a319a48e4ea1a3a714e975328190, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=78, earliestPutTs=1733480336991 2024-12-06T10:19:01,672 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.Compactor(224): Compacting 3508611c5da44ec29d34d92c6e30187c, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=91, earliestPutTs=1733480337656 2024-12-06T10:19:01,673 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.Compactor(224): Compacting aeaab6fa81604ee4bc7777ccc75d3a24, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=116, earliestPutTs=1733480338825 2024-12-06T10:19:01,677 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in fa004d65282160f629f3eb2a5c9dca1d/A of fa004d65282160f629f3eb2a5c9dca1d into 6eab958228124930a5383b219bfe0321(size=12.0 K), total size for store is 12.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-06T10:19:01,677 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for fa004d65282160f629f3eb2a5c9dca1d: 2024-12-06T10:19:01,677 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d., storeName=fa004d65282160f629f3eb2a5c9dca1d/A, priority=12, startTime=1733480341223; duration=0sec 2024-12-06T10:19:01,677 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T10:19:01,677 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: fa004d65282160f629f3eb2a5c9dca1d:A 2024-12-06T10:19:01,685 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): fa004d65282160f629f3eb2a5c9dca1d#C#compaction#470 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-06T10:19:01,685 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/.tmp/C/1bb19bc19e4943a1845c2f044b9b628b is 50, key is test_row_0/C:col10/1733480338826/Put/seqid=0 2024-12-06T10:19:01,696 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=127 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/.tmp/A/33edb85437004723baa4de97de365b34 2024-12-06T10:19:01,706 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742382_1558 (size=12241) 2024-12-06T10:19:01,715 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/.tmp/B/0d3c0a9a59214a299be1b5d876c6861b is 50, key is test_row_0/B:col10/1733480339961/Put/seqid=0 2024-12-06T10:19:01,725 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742383_1559 (size=12001) 2024-12-06T10:19:01,726 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=127 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/.tmp/B/0d3c0a9a59214a299be1b5d876c6861b 2024-12-06T10:19:01,741 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/.tmp/C/967785be353b48d0b1a0ff9c1877fdf3 is 50, key is test_row_0/C:col10/1733480339961/Put/seqid=0 2024-12-06T10:19:01,764 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742384_1560 (size=12001) 2024-12-06T10:19:01,766 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=127 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/.tmp/C/967785be353b48d0b1a0ff9c1877fdf3 2024-12-06T10:19:01,773 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/.tmp/A/33edb85437004723baa4de97de365b34 as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/A/33edb85437004723baa4de97de365b34 2024-12-06T10:19:01,777 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/A/33edb85437004723baa4de97de365b34, entries=150, sequenceid=127, filesize=11.7 K 2024-12-06T10:19:01,778 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/.tmp/B/0d3c0a9a59214a299be1b5d876c6861b as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/B/0d3c0a9a59214a299be1b5d876c6861b 2024-12-06T10:19:01,782 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/B/0d3c0a9a59214a299be1b5d876c6861b, entries=150, sequenceid=127, filesize=11.7 K 2024-12-06T10:19:01,782 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/.tmp/C/967785be353b48d0b1a0ff9c1877fdf3 as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/C/967785be353b48d0b1a0ff9c1877fdf3 2024-12-06T10:19:01,785 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/C/967785be353b48d0b1a0ff9c1877fdf3, entries=150, sequenceid=127, filesize=11.7 K 2024-12-06T10:19:01,786 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=0 B/0 for fa004d65282160f629f3eb2a5c9dca1d in 500ms, sequenceid=127, compaction requested=false 2024-12-06T10:19:01,786 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegion(2538): Flush status journal for fa004d65282160f629f3eb2a5c9dca1d: 2024-12-06T10:19:01,786 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d. 2024-12-06T10:19:01,786 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=135 2024-12-06T10:19:01,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.HMaster(4106): Remote procedure done, pid=135 2024-12-06T10:19:01,788 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=135, resume processing ppid=134 2024-12-06T10:19:01,788 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=135, ppid=134, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.8830 sec 2024-12-06T10:19:01,789 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=134, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=134, table=TestAcidGuarantees in 1.8870 sec 2024-12-06T10:19:02,007 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=134 2024-12-06T10:19:02,008 INFO [Thread-2350 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 134 completed 2024-12-06T10:19:02,009 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-06T10:19:02,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] procedure2.ProcedureExecutor(1098): Stored pid=136, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=136, table=TestAcidGuarantees 2024-12-06T10:19:02,010 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=136, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=136, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-06T10:19:02,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=136 2024-12-06T10:19:02,011 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=136, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=136, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-06T10:19:02,011 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=137, ppid=136, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-06T10:19:02,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(8581): Flush requested on fa004d65282160f629f3eb2a5c9dca1d 2024-12-06T10:19:02,101 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing fa004d65282160f629f3eb2a5c9dca1d 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-06T10:19:02,101 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK fa004d65282160f629f3eb2a5c9dca1d, store=A 2024-12-06T10:19:02,101 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:19:02,101 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK fa004d65282160f629f3eb2a5c9dca1d, store=B 2024-12-06T10:19:02,101 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:19:02,101 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK fa004d65282160f629f3eb2a5c9dca1d, store=C 2024-12-06T10:19:02,101 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:19:02,106 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/.tmp/A/876c3e48ec8a49e29f2c5991165d78e2 is 50, key is test_row_0/A:col10/1733480342099/Put/seqid=0 2024-12-06T10:19:02,111 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742385_1561 (size=16927) 2024-12-06T10:19:02,111 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/.tmp/C/1bb19bc19e4943a1845c2f044b9b628b as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/C/1bb19bc19e4943a1845c2f044b9b628b 2024-12-06T10:19:02,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=136 2024-12-06T10:19:02,115 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in fa004d65282160f629f3eb2a5c9dca1d/C of fa004d65282160f629f3eb2a5c9dca1d into 1bb19bc19e4943a1845c2f044b9b628b(size=12.0 K), total size for store is 23.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-06T10:19:02,115 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for fa004d65282160f629f3eb2a5c9dca1d: 2024-12-06T10:19:02,115 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d., storeName=fa004d65282160f629f3eb2a5c9dca1d/C, priority=12, startTime=1733480341223; duration=0sec 2024-12-06T10:19:02,115 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T10:19:02,115 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: fa004d65282160f629f3eb2a5c9dca1d:C 2024-12-06T10:19:02,131 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:02,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 79 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51440 deadline: 1733480402125, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:02,131 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:02,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 75 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51492 deadline: 1733480402125, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:02,134 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:02,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51466 deadline: 1733480402129, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:02,136 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:02,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51434 deadline: 1733480402130, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:02,137 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:02,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51460 deadline: 1733480402131, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:02,163 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,33397,1733480204743 2024-12-06T10:19:02,164 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33397 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=137 2024-12-06T10:19:02,164 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d. 2024-12-06T10:19:02,164 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d. as already flushing 2024-12-06T10:19:02,164 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d. 2024-12-06T10:19:02,164 ERROR [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] handler.RSProcedureHandler(58): pid=137 java.io.IOException: Unable to complete flush {ENCODED => fa004d65282160f629f3eb2a5c9dca1d, NAME => 'TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:19:02,164 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=137 java.io.IOException: Unable to complete flush {ENCODED => fa004d65282160f629f3eb2a5c9dca1d, NAME => 'TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:19:02,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.HMaster(4114): Remote procedure failed, pid=137 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => fa004d65282160f629f3eb2a5c9dca1d, NAME => 'TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => fa004d65282160f629f3eb2a5c9dca1d, NAME => 'TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:19:02,235 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:02,236 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 81 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51440 deadline: 1733480402232, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:02,236 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:02,236 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 77 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51492 deadline: 1733480402232, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:02,239 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:02,239 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51466 deadline: 1733480402234, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:02,240 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:02,240 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51434 deadline: 1733480402237, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:02,240 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:02,241 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51460 deadline: 1733480402238, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:02,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=136 2024-12-06T10:19:02,316 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,33397,1733480204743 2024-12-06T10:19:02,317 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33397 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=137 2024-12-06T10:19:02,317 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d. 2024-12-06T10:19:02,317 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d. as already flushing 2024-12-06T10:19:02,317 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d. 2024-12-06T10:19:02,317 ERROR [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] handler.RSProcedureHandler(58): pid=137 java.io.IOException: Unable to complete flush {ENCODED => fa004d65282160f629f3eb2a5c9dca1d, NAME => 'TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:19:02,317 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=137 java.io.IOException: Unable to complete flush {ENCODED => fa004d65282160f629f3eb2a5c9dca1d, NAME => 'TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:19:02,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.HMaster(4114): Remote procedure failed, pid=137 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => fa004d65282160f629f3eb2a5c9dca1d, NAME => 'TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => fa004d65282160f629f3eb2a5c9dca1d, NAME => 'TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:19:02,440 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:02,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51440 deadline: 1733480402437, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:02,441 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:02,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 79 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51492 deadline: 1733480402437, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:02,445 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:02,445 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51466 deadline: 1733480402441, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:02,447 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:02,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51434 deadline: 1733480402442, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:02,447 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:02,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51460 deadline: 1733480402443, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:02,469 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,33397,1733480204743 2024-12-06T10:19:02,469 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33397 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=137 2024-12-06T10:19:02,469 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d. 2024-12-06T10:19:02,470 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d. as already flushing 2024-12-06T10:19:02,470 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d. 2024-12-06T10:19:02,470 ERROR [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] handler.RSProcedureHandler(58): pid=137 java.io.IOException: Unable to complete flush {ENCODED => fa004d65282160f629f3eb2a5c9dca1d, NAME => 'TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:19:02,470 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=137 java.io.IOException: Unable to complete flush {ENCODED => fa004d65282160f629f3eb2a5c9dca1d, NAME => 'TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:19:02,470 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.HMaster(4114): Remote procedure failed, pid=137 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => fa004d65282160f629f3eb2a5c9dca1d, NAME => 'TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => fa004d65282160f629f3eb2a5c9dca1d, NAME => 'TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:19:02,511 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=141 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/.tmp/A/876c3e48ec8a49e29f2c5991165d78e2 2024-12-06T10:19:02,521 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/.tmp/B/d76ac71130cd4ff18801e32e486c9c2b is 50, key is test_row_0/B:col10/1733480342099/Put/seqid=0 2024-12-06T10:19:02,537 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742386_1562 (size=9757) 2024-12-06T10:19:02,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=136 2024-12-06T10:19:02,622 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,33397,1733480204743 2024-12-06T10:19:02,622 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33397 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=137 2024-12-06T10:19:02,622 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d. 2024-12-06T10:19:02,622 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d. as already flushing 2024-12-06T10:19:02,622 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d. 2024-12-06T10:19:02,622 ERROR [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] handler.RSProcedureHandler(58): pid=137 java.io.IOException: Unable to complete flush {ENCODED => fa004d65282160f629f3eb2a5c9dca1d, NAME => 'TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:19:02,622 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=137 java.io.IOException: Unable to complete flush {ENCODED => fa004d65282160f629f3eb2a5c9dca1d, NAME => 'TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:19:02,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.HMaster(4114): Remote procedure failed, pid=137 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => fa004d65282160f629f3eb2a5c9dca1d, NAME => 'TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => fa004d65282160f629f3eb2a5c9dca1d, NAME => 'TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:19:02,746 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:02,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 81 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51492 deadline: 1733480402743, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:02,746 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:02,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51440 deadline: 1733480402743, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:02,751 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:02,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51466 deadline: 1733480402746, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:02,754 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:02,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51434 deadline: 1733480402749, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:02,755 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:02,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51460 deadline: 1733480402750, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:02,774 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,33397,1733480204743 2024-12-06T10:19:02,775 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33397 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=137 2024-12-06T10:19:02,775 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d. 2024-12-06T10:19:02,775 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d. as already flushing 2024-12-06T10:19:02,775 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d. 2024-12-06T10:19:02,775 ERROR [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] handler.RSProcedureHandler(58): pid=137 java.io.IOException: Unable to complete flush {ENCODED => fa004d65282160f629f3eb2a5c9dca1d, NAME => 'TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:19:02,775 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=137 java.io.IOException: Unable to complete flush {ENCODED => fa004d65282160f629f3eb2a5c9dca1d, NAME => 'TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:19:02,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.HMaster(4114): Remote procedure failed, pid=137 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => fa004d65282160f629f3eb2a5c9dca1d, NAME => 'TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => fa004d65282160f629f3eb2a5c9dca1d, NAME => 'TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:19:02,927 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,33397,1733480204743 2024-12-06T10:19:02,927 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33397 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=137 2024-12-06T10:19:02,927 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d. 2024-12-06T10:19:02,928 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d. as already flushing 2024-12-06T10:19:02,928 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d. 2024-12-06T10:19:02,928 ERROR [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] handler.RSProcedureHandler(58): pid=137 java.io.IOException: Unable to complete flush {ENCODED => fa004d65282160f629f3eb2a5c9dca1d, NAME => 'TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:19:02,928 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=137 java.io.IOException: Unable to complete flush {ENCODED => fa004d65282160f629f3eb2a5c9dca1d, NAME => 'TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:19:02,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.HMaster(4114): Remote procedure failed, pid=137 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => fa004d65282160f629f3eb2a5c9dca1d, NAME => 'TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => fa004d65282160f629f3eb2a5c9dca1d, NAME => 'TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:19:02,938 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=141 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/.tmp/B/d76ac71130cd4ff18801e32e486c9c2b 2024-12-06T10:19:02,946 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/.tmp/C/ba0d412334824952b83ab1759fd80f1d is 50, key is test_row_0/C:col10/1733480342099/Put/seqid=0 2024-12-06T10:19:02,950 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742387_1563 (size=9757) 2024-12-06T10:19:03,080 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,33397,1733480204743 2024-12-06T10:19:03,080 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33397 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=137 2024-12-06T10:19:03,080 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d. 2024-12-06T10:19:03,080 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d. as already flushing 2024-12-06T10:19:03,080 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d. 2024-12-06T10:19:03,080 ERROR [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] handler.RSProcedureHandler(58): pid=137 java.io.IOException: Unable to complete flush {ENCODED => fa004d65282160f629f3eb2a5c9dca1d, NAME => 'TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:19:03,081 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=137 java.io.IOException: Unable to complete flush {ENCODED => fa004d65282160f629f3eb2a5c9dca1d, NAME => 'TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:19:03,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.HMaster(4114): Remote procedure failed, pid=137 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => fa004d65282160f629f3eb2a5c9dca1d, NAME => 'TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => fa004d65282160f629f3eb2a5c9dca1d, NAME => 'TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:19:03,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=136 2024-12-06T10:19:03,232 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,33397,1733480204743 2024-12-06T10:19:03,233 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33397 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=137 2024-12-06T10:19:03,233 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d. 2024-12-06T10:19:03,233 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d. as already flushing 2024-12-06T10:19:03,233 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d. 2024-12-06T10:19:03,233 ERROR [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] handler.RSProcedureHandler(58): pid=137 java.io.IOException: Unable to complete flush {ENCODED => fa004d65282160f629f3eb2a5c9dca1d, NAME => 'TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:19:03,233 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=137 java.io.IOException: Unable to complete flush {ENCODED => fa004d65282160f629f3eb2a5c9dca1d, NAME => 'TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:19:03,234 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.HMaster(4114): Remote procedure failed, pid=137 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => fa004d65282160f629f3eb2a5c9dca1d, NAME => 'TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => fa004d65282160f629f3eb2a5c9dca1d, NAME => 'TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:19:03,253 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:03,254 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 87 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51440 deadline: 1733480403250, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:03,256 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:03,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51492 deadline: 1733480403251, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:03,263 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:03,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51460 deadline: 1733480403260, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:03,264 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:03,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 92 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51434 deadline: 1733480403260, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:03,264 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:03,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51466 deadline: 1733480403260, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:03,351 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=141 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/.tmp/C/ba0d412334824952b83ab1759fd80f1d 2024-12-06T10:19:03,355 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/.tmp/A/876c3e48ec8a49e29f2c5991165d78e2 as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/A/876c3e48ec8a49e29f2c5991165d78e2 2024-12-06T10:19:03,358 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/A/876c3e48ec8a49e29f2c5991165d78e2, entries=250, sequenceid=141, filesize=16.5 K 2024-12-06T10:19:03,359 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/.tmp/B/d76ac71130cd4ff18801e32e486c9c2b as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/B/d76ac71130cd4ff18801e32e486c9c2b 2024-12-06T10:19:03,362 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/B/d76ac71130cd4ff18801e32e486c9c2b, entries=100, sequenceid=141, filesize=9.5 K 2024-12-06T10:19:03,363 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/.tmp/C/ba0d412334824952b83ab1759fd80f1d as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/C/ba0d412334824952b83ab1759fd80f1d 2024-12-06T10:19:03,366 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/C/ba0d412334824952b83ab1759fd80f1d, entries=100, sequenceid=141, filesize=9.5 K 2024-12-06T10:19:03,370 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for fa004d65282160f629f3eb2a5c9dca1d in 1270ms, sequenceid=141, compaction requested=true 2024-12-06T10:19:03,370 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for fa004d65282160f629f3eb2a5c9dca1d: 2024-12-06T10:19:03,370 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-06T10:19:03,371 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 41169 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-06T10:19:03,371 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HStore(1540): fa004d65282160f629f3eb2a5c9dca1d/A is initiating minor compaction (all files) 2024-12-06T10:19:03,371 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of fa004d65282160f629f3eb2a5c9dca1d/A in TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d. 2024-12-06T10:19:03,371 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/A/6eab958228124930a5383b219bfe0321, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/A/33edb85437004723baa4de97de365b34, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/A/876c3e48ec8a49e29f2c5991165d78e2] into tmpdir=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/.tmp, totalSize=40.2 K 2024-12-06T10:19:03,372 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.Compactor(224): Compacting 6eab958228124930a5383b219bfe0321, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=116, earliestPutTs=1733480338825 2024-12-06T10:19:03,372 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store fa004d65282160f629f3eb2a5c9dca1d:A, priority=-2147483648, current under compaction store size is 1 2024-12-06T10:19:03,372 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T10:19:03,372 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-06T10:19:03,372 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.Compactor(224): Compacting 33edb85437004723baa4de97de365b34, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=127, earliestPutTs=1733480339953 2024-12-06T10:19:03,372 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.Compactor(224): Compacting 876c3e48ec8a49e29f2c5991165d78e2, keycount=250, bloomtype=ROW, size=16.5 K, encoding=NONE, compression=NONE, seqNum=141, earliestPutTs=1733480342098 2024-12-06T10:19:03,373 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store fa004d65282160f629f3eb2a5c9dca1d:B, priority=-2147483648, current under compaction store size is 2 2024-12-06T10:19:03,373 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T10:19:03,373 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store fa004d65282160f629f3eb2a5c9dca1d:C, priority=-2147483648, current under compaction store size is 3 2024-12-06T10:19:03,373 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-06T10:19:03,373 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 33999 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-06T10:19:03,373 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HStore(1540): fa004d65282160f629f3eb2a5c9dca1d/B is initiating minor compaction (all files) 2024-12-06T10:19:03,373 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of fa004d65282160f629f3eb2a5c9dca1d/B in TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d. 2024-12-06T10:19:03,373 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/B/92b2b77b87cb4b248d55da41c331e1d3, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/B/0d3c0a9a59214a299be1b5d876c6861b, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/B/d76ac71130cd4ff18801e32e486c9c2b] into tmpdir=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/.tmp, totalSize=33.2 K 2024-12-06T10:19:03,374 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.Compactor(224): Compacting 92b2b77b87cb4b248d55da41c331e1d3, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=116, earliestPutTs=1733480338825 2024-12-06T10:19:03,374 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.Compactor(224): Compacting 0d3c0a9a59214a299be1b5d876c6861b, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=127, earliestPutTs=1733480339953 2024-12-06T10:19:03,374 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.Compactor(224): Compacting d76ac71130cd4ff18801e32e486c9c2b, keycount=100, bloomtype=ROW, size=9.5 K, encoding=NONE, compression=NONE, seqNum=141, earliestPutTs=1733480342099 2024-12-06T10:19:03,381 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): fa004d65282160f629f3eb2a5c9dca1d#A#compaction#476 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-06T10:19:03,382 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/.tmp/A/1d97ff855e6a4cb0bbcdaadae6aedccd is 50, key is test_row_0/A:col10/1733480342099/Put/seqid=0 2024-12-06T10:19:03,385 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): fa004d65282160f629f3eb2a5c9dca1d#B#compaction#477 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-06T10:19:03,386 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/.tmp/B/18692387529648b89a4897f4e47b955c is 50, key is test_row_0/B:col10/1733480342099/Put/seqid=0 2024-12-06T10:19:03,386 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,33397,1733480204743 2024-12-06T10:19:03,387 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33397 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=137 2024-12-06T10:19:03,387 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d. 2024-12-06T10:19:03,387 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HRegion(2837): Flushing fa004d65282160f629f3eb2a5c9dca1d 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-12-06T10:19:03,387 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.CompactingMemStore(205): FLUSHING TO DISK fa004d65282160f629f3eb2a5c9dca1d, store=A 2024-12-06T10:19:03,387 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:19:03,387 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.CompactingMemStore(205): FLUSHING TO DISK fa004d65282160f629f3eb2a5c9dca1d, store=B 2024-12-06T10:19:03,387 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:19:03,388 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.CompactingMemStore(205): FLUSHING TO DISK fa004d65282160f629f3eb2a5c9dca1d, store=C 2024-12-06T10:19:03,388 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:19:03,407 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742388_1564 (size=12443) 2024-12-06T10:19:03,411 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/.tmp/A/58a84f83239949f3972a01b499ed0c8a is 50, key is test_row_0/A:col10/1733480342129/Put/seqid=0 2024-12-06T10:19:03,413 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/.tmp/A/1d97ff855e6a4cb0bbcdaadae6aedccd as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/A/1d97ff855e6a4cb0bbcdaadae6aedccd 2024-12-06T10:19:03,416 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742389_1565 (size=12443) 2024-12-06T10:19:03,419 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in fa004d65282160f629f3eb2a5c9dca1d/A of fa004d65282160f629f3eb2a5c9dca1d into 1d97ff855e6a4cb0bbcdaadae6aedccd(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-06T10:19:03,419 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for fa004d65282160f629f3eb2a5c9dca1d: 2024-12-06T10:19:03,419 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d., storeName=fa004d65282160f629f3eb2a5c9dca1d/A, priority=13, startTime=1733480343370; duration=0sec 2024-12-06T10:19:03,419 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-06T10:19:03,419 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: fa004d65282160f629f3eb2a5c9dca1d:A 2024-12-06T10:19:03,419 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-06T10:19:03,422 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 33999 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-06T10:19:03,422 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HStore(1540): fa004d65282160f629f3eb2a5c9dca1d/C is initiating minor compaction (all files) 2024-12-06T10:19:03,422 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of fa004d65282160f629f3eb2a5c9dca1d/C in TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d. 2024-12-06T10:19:03,422 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/C/1bb19bc19e4943a1845c2f044b9b628b, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/C/967785be353b48d0b1a0ff9c1877fdf3, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/C/ba0d412334824952b83ab1759fd80f1d] into tmpdir=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/.tmp, totalSize=33.2 K 2024-12-06T10:19:03,422 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1bb19bc19e4943a1845c2f044b9b628b, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=116, earliestPutTs=1733480338825 2024-12-06T10:19:03,423 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.Compactor(224): Compacting 967785be353b48d0b1a0ff9c1877fdf3, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=127, earliestPutTs=1733480339953 2024-12-06T10:19:03,423 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.Compactor(224): Compacting ba0d412334824952b83ab1759fd80f1d, keycount=100, bloomtype=ROW, size=9.5 K, encoding=NONE, compression=NONE, seqNum=141, earliestPutTs=1733480342099 2024-12-06T10:19:03,425 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/.tmp/B/18692387529648b89a4897f4e47b955c as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/B/18692387529648b89a4897f4e47b955c 2024-12-06T10:19:03,434 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in fa004d65282160f629f3eb2a5c9dca1d/B of fa004d65282160f629f3eb2a5c9dca1d into 18692387529648b89a4897f4e47b955c(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-06T10:19:03,434 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for fa004d65282160f629f3eb2a5c9dca1d: 2024-12-06T10:19:03,434 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d., storeName=fa004d65282160f629f3eb2a5c9dca1d/B, priority=13, startTime=1733480343372; duration=0sec 2024-12-06T10:19:03,434 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T10:19:03,434 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: fa004d65282160f629f3eb2a5c9dca1d:B 2024-12-06T10:19:03,442 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): fa004d65282160f629f3eb2a5c9dca1d#C#compaction#479 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-06T10:19:03,442 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/.tmp/C/b634e0edd5c447ac8f91e63cd256eac5 is 50, key is test_row_0/C:col10/1733480342099/Put/seqid=0 2024-12-06T10:19:03,445 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742390_1566 (size=12151) 2024-12-06T10:19:03,446 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=166 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/.tmp/A/58a84f83239949f3972a01b499ed0c8a 2024-12-06T10:19:03,478 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/.tmp/B/0a7699cd9c98473383f54b9b57d28ddf is 50, key is test_row_0/B:col10/1733480342129/Put/seqid=0 2024-12-06T10:19:03,483 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742391_1567 (size=12443) 2024-12-06T10:19:03,492 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/.tmp/C/b634e0edd5c447ac8f91e63cd256eac5 as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/C/b634e0edd5c447ac8f91e63cd256eac5 2024-12-06T10:19:03,497 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in fa004d65282160f629f3eb2a5c9dca1d/C of fa004d65282160f629f3eb2a5c9dca1d into b634e0edd5c447ac8f91e63cd256eac5(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-06T10:19:03,497 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for fa004d65282160f629f3eb2a5c9dca1d: 2024-12-06T10:19:03,497 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d., storeName=fa004d65282160f629f3eb2a5c9dca1d/C, priority=13, startTime=1733480343373; duration=0sec 2024-12-06T10:19:03,497 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T10:19:03,497 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: fa004d65282160f629f3eb2a5c9dca1d:C 2024-12-06T10:19:03,499 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742392_1568 (size=12151) 2024-12-06T10:19:03,501 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=166 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/.tmp/B/0a7699cd9c98473383f54b9b57d28ddf 2024-12-06T10:19:03,508 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/.tmp/C/48b76e4bc7fe4f638a7dc12b1d73766f is 50, key is test_row_0/C:col10/1733480342129/Put/seqid=0 2024-12-06T10:19:03,518 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742393_1569 (size=12151) 2024-12-06T10:19:03,519 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=166 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/.tmp/C/48b76e4bc7fe4f638a7dc12b1d73766f 2024-12-06T10:19:03,525 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/.tmp/A/58a84f83239949f3972a01b499ed0c8a as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/A/58a84f83239949f3972a01b499ed0c8a 2024-12-06T10:19:03,529 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/A/58a84f83239949f3972a01b499ed0c8a, entries=150, sequenceid=166, filesize=11.9 K 2024-12-06T10:19:03,530 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/.tmp/B/0a7699cd9c98473383f54b9b57d28ddf as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/B/0a7699cd9c98473383f54b9b57d28ddf 2024-12-06T10:19:03,534 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/B/0a7699cd9c98473383f54b9b57d28ddf, entries=150, sequenceid=166, filesize=11.9 K 2024-12-06T10:19:03,535 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/.tmp/C/48b76e4bc7fe4f638a7dc12b1d73766f as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/C/48b76e4bc7fe4f638a7dc12b1d73766f 2024-12-06T10:19:03,543 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/C/48b76e4bc7fe4f638a7dc12b1d73766f, entries=150, sequenceid=166, filesize=11.9 K 2024-12-06T10:19:03,545 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=0 B/0 for fa004d65282160f629f3eb2a5c9dca1d in 158ms, sequenceid=166, compaction requested=false 2024-12-06T10:19:03,545 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HRegion(2538): Flush status journal for fa004d65282160f629f3eb2a5c9dca1d: 2024-12-06T10:19:03,545 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d. 2024-12-06T10:19:03,545 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=137 2024-12-06T10:19:03,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.HMaster(4106): Remote procedure done, pid=137 2024-12-06T10:19:03,548 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=137, resume processing ppid=136 2024-12-06T10:19:03,548 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=137, ppid=136, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.5350 sec 2024-12-06T10:19:03,549 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=136, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=136, table=TestAcidGuarantees in 1.5400 sec 2024-12-06T10:19:04,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=136 2024-12-06T10:19:04,115 INFO [Thread-2350 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 136 completed 2024-12-06T10:19:04,116 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-06T10:19:04,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33169 {}] procedure2.ProcedureExecutor(1098): Stored pid=138, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=138, table=TestAcidGuarantees 2024-12-06T10:19:04,118 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=138, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=138, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-06T10:19:04,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=138 2024-12-06T10:19:04,119 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=138, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=138, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-06T10:19:04,119 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=139, ppid=138, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-06T10:19:04,219 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=138 2024-12-06T10:19:04,270 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,33397,1733480204743 2024-12-06T10:19:04,270 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(8581): Flush requested on fa004d65282160f629f3eb2a5c9dca1d 2024-12-06T10:19:04,271 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing fa004d65282160f629f3eb2a5c9dca1d 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-06T10:19:04,271 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK fa004d65282160f629f3eb2a5c9dca1d, store=A 2024-12-06T10:19:04,271 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33397 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=139 2024-12-06T10:19:04,271 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:19:04,271 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK fa004d65282160f629f3eb2a5c9dca1d, store=B 2024-12-06T10:19:04,271 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:19:04,271 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d. 2024-12-06T10:19:04,271 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK fa004d65282160f629f3eb2a5c9dca1d, store=C 2024-12-06T10:19:04,271 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:19:04,271 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d. as already flushing 2024-12-06T10:19:04,271 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d. 2024-12-06T10:19:04,271 ERROR [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] handler.RSProcedureHandler(58): pid=139 java.io.IOException: Unable to complete flush {ENCODED => fa004d65282160f629f3eb2a5c9dca1d, NAME => 'TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:19:04,271 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=139 java.io.IOException: Unable to complete flush {ENCODED => fa004d65282160f629f3eb2a5c9dca1d, NAME => 'TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:19:04,272 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33169 {}] master.HMaster(4114): Remote procedure failed, pid=139 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => fa004d65282160f629f3eb2a5c9dca1d, NAME => 'TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => fa004d65282160f629f3eb2a5c9dca1d, NAME => 'TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:19:04,276 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/.tmp/A/9d28ebac95f64c3998f275f2d64ded22 is 50, key is test_row_0/A:col10/1733480344269/Put/seqid=0 2024-12-06T10:19:04,281 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742394_1570 (size=16931) 2024-12-06T10:19:04,282 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=180 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/.tmp/A/9d28ebac95f64c3998f275f2d64ded22 2024-12-06T10:19:04,290 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/.tmp/B/8892a451950b4217ad3773bb3d40fd16 is 50, key is test_row_0/B:col10/1733480344269/Put/seqid=0 2024-12-06T10:19:04,293 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742395_1571 (size=12151) 2024-12-06T10:19:04,294 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=180 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/.tmp/B/8892a451950b4217ad3773bb3d40fd16 2024-12-06T10:19:04,300 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/.tmp/C/ab87017181ea48bda380b3ab3e499571 is 50, key is test_row_0/C:col10/1733480344269/Put/seqid=0 2024-12-06T10:19:04,310 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742396_1572 (size=12151) 2024-12-06T10:19:04,311 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=180 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/.tmp/C/ab87017181ea48bda380b3ab3e499571 2024-12-06T10:19:04,314 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:04,314 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 92 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51460 deadline: 1733480404305, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:04,314 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:04,314 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51492 deadline: 1733480404310, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:04,316 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/.tmp/A/9d28ebac95f64c3998f275f2d64ded22 as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/A/9d28ebac95f64c3998f275f2d64ded22 2024-12-06T10:19:04,319 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:04,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51440 deadline: 1733480404312, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:04,320 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/A/9d28ebac95f64c3998f275f2d64ded22, entries=250, sequenceid=180, filesize=16.5 K 2024-12-06T10:19:04,321 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/.tmp/B/8892a451950b4217ad3773bb3d40fd16 as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/B/8892a451950b4217ad3773bb3d40fd16 2024-12-06T10:19:04,322 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:04,323 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51434 deadline: 1733480404312, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:04,323 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:04,323 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 91 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51466 deadline: 1733480404314, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:04,324 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/B/8892a451950b4217ad3773bb3d40fd16, entries=150, sequenceid=180, filesize=11.9 K 2024-12-06T10:19:04,325 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/.tmp/C/ab87017181ea48bda380b3ab3e499571 as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/C/ab87017181ea48bda380b3ab3e499571 2024-12-06T10:19:04,327 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/C/ab87017181ea48bda380b3ab3e499571, entries=150, sequenceid=180, filesize=11.9 K 2024-12-06T10:19:04,329 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for fa004d65282160f629f3eb2a5c9dca1d in 59ms, sequenceid=180, compaction requested=true 2024-12-06T10:19:04,329 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for fa004d65282160f629f3eb2a5c9dca1d: 2024-12-06T10:19:04,329 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store fa004d65282160f629f3eb2a5c9dca1d:A, priority=-2147483648, current under compaction store size is 1 2024-12-06T10:19:04,329 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T10:19:04,329 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-06T10:19:04,329 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-06T10:19:04,329 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store fa004d65282160f629f3eb2a5c9dca1d:B, priority=-2147483648, current under compaction store size is 2 2024-12-06T10:19:04,329 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T10:19:04,329 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store fa004d65282160f629f3eb2a5c9dca1d:C, priority=-2147483648, current under compaction store size is 3 2024-12-06T10:19:04,329 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-06T10:19:04,330 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 41525 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-06T10:19:04,330 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36745 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-06T10:19:04,330 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HStore(1540): fa004d65282160f629f3eb2a5c9dca1d/B is initiating minor compaction (all files) 2024-12-06T10:19:04,330 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HStore(1540): fa004d65282160f629f3eb2a5c9dca1d/A is initiating minor compaction (all files) 2024-12-06T10:19:04,330 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of fa004d65282160f629f3eb2a5c9dca1d/B in TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d. 2024-12-06T10:19:04,330 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of fa004d65282160f629f3eb2a5c9dca1d/A in TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d. 2024-12-06T10:19:04,330 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/B/18692387529648b89a4897f4e47b955c, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/B/0a7699cd9c98473383f54b9b57d28ddf, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/B/8892a451950b4217ad3773bb3d40fd16] into tmpdir=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/.tmp, totalSize=35.9 K 2024-12-06T10:19:04,330 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/A/1d97ff855e6a4cb0bbcdaadae6aedccd, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/A/58a84f83239949f3972a01b499ed0c8a, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/A/9d28ebac95f64c3998f275f2d64ded22] into tmpdir=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/.tmp, totalSize=40.6 K 2024-12-06T10:19:04,330 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.Compactor(224): Compacting 18692387529648b89a4897f4e47b955c, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=141, earliestPutTs=1733480339953 2024-12-06T10:19:04,330 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1d97ff855e6a4cb0bbcdaadae6aedccd, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=141, earliestPutTs=1733480339953 2024-12-06T10:19:04,331 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.Compactor(224): Compacting 58a84f83239949f3972a01b499ed0c8a, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=166, earliestPutTs=1733480342123 2024-12-06T10:19:04,331 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.Compactor(224): Compacting 0a7699cd9c98473383f54b9b57d28ddf, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=166, earliestPutTs=1733480342123 2024-12-06T10:19:04,331 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.Compactor(224): Compacting 8892a451950b4217ad3773bb3d40fd16, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=180, earliestPutTs=1733480344264 2024-12-06T10:19:04,331 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.Compactor(224): Compacting 9d28ebac95f64c3998f275f2d64ded22, keycount=250, bloomtype=ROW, size=16.5 K, encoding=NONE, compression=NONE, seqNum=180, earliestPutTs=1733480344264 2024-12-06T10:19:04,339 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): fa004d65282160f629f3eb2a5c9dca1d#B#compaction#485 average throughput is unlimited, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-06T10:19:04,339 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/.tmp/B/a5d29ea9fdc043519bc0abe811215b16 is 50, key is test_row_0/B:col10/1733480344269/Put/seqid=0 2024-12-06T10:19:04,343 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): fa004d65282160f629f3eb2a5c9dca1d#A#compaction#486 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-06T10:19:04,343 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/.tmp/A/60a2d5a4cce74fbba3fab7f61d0a27ec is 50, key is test_row_0/A:col10/1733480344269/Put/seqid=0 2024-12-06T10:19:04,356 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742397_1573 (size=12595) 2024-12-06T10:19:04,356 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742398_1574 (size=12595) 2024-12-06T10:19:04,361 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/.tmp/A/60a2d5a4cce74fbba3fab7f61d0a27ec as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/A/60a2d5a4cce74fbba3fab7f61d0a27ec 2024-12-06T10:19:04,368 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in fa004d65282160f629f3eb2a5c9dca1d/A of fa004d65282160f629f3eb2a5c9dca1d into 60a2d5a4cce74fbba3fab7f61d0a27ec(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-06T10:19:04,368 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for fa004d65282160f629f3eb2a5c9dca1d: 2024-12-06T10:19:04,368 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d., storeName=fa004d65282160f629f3eb2a5c9dca1d/A, priority=13, startTime=1733480344329; duration=0sec 2024-12-06T10:19:04,368 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-06T10:19:04,368 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: fa004d65282160f629f3eb2a5c9dca1d:A 2024-12-06T10:19:04,368 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-06T10:19:04,369 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36745 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-06T10:19:04,369 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HStore(1540): fa004d65282160f629f3eb2a5c9dca1d/C is initiating minor compaction (all files) 2024-12-06T10:19:04,369 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of fa004d65282160f629f3eb2a5c9dca1d/C in TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d. 2024-12-06T10:19:04,369 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/C/b634e0edd5c447ac8f91e63cd256eac5, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/C/48b76e4bc7fe4f638a7dc12b1d73766f, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/C/ab87017181ea48bda380b3ab3e499571] into tmpdir=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/.tmp, totalSize=35.9 K 2024-12-06T10:19:04,370 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.Compactor(224): Compacting b634e0edd5c447ac8f91e63cd256eac5, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=141, earliestPutTs=1733480339953 2024-12-06T10:19:04,370 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.Compactor(224): Compacting 48b76e4bc7fe4f638a7dc12b1d73766f, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=166, earliestPutTs=1733480342123 2024-12-06T10:19:04,370 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.Compactor(224): Compacting ab87017181ea48bda380b3ab3e499571, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=180, earliestPutTs=1733480344264 2024-12-06T10:19:04,378 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): fa004d65282160f629f3eb2a5c9dca1d#C#compaction#487 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-06T10:19:04,379 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/.tmp/C/6485528518224d73b1fbddc371c4856b is 50, key is test_row_0/C:col10/1733480344269/Put/seqid=0 2024-12-06T10:19:04,394 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742399_1575 (size=12595) 2024-12-06T10:19:04,420 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=138 2024-12-06T10:19:04,421 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(8581): Flush requested on fa004d65282160f629f3eb2a5c9dca1d 2024-12-06T10:19:04,421 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing fa004d65282160f629f3eb2a5c9dca1d 3/3 column families, dataSize=161.02 KB heapSize=422.63 KB 2024-12-06T10:19:04,421 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK fa004d65282160f629f3eb2a5c9dca1d, store=A 2024-12-06T10:19:04,421 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:19:04,421 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK fa004d65282160f629f3eb2a5c9dca1d, store=B 2024-12-06T10:19:04,421 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:19:04,421 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK fa004d65282160f629f3eb2a5c9dca1d, store=C 2024-12-06T10:19:04,422 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:19:04,423 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,33397,1733480204743 2024-12-06T10:19:04,424 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33397 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=139 2024-12-06T10:19:04,424 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d. 2024-12-06T10:19:04,424 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d. as already flushing 2024-12-06T10:19:04,424 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d. 2024-12-06T10:19:04,424 ERROR [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] handler.RSProcedureHandler(58): pid=139 java.io.IOException: Unable to complete flush {ENCODED => fa004d65282160f629f3eb2a5c9dca1d, NAME => 'TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:19:04,424 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=139 java.io.IOException: Unable to complete flush {ENCODED => fa004d65282160f629f3eb2a5c9dca1d, NAME => 'TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:19:04,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33169 {}] master.HMaster(4114): Remote procedure failed, pid=139 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => fa004d65282160f629f3eb2a5c9dca1d, NAME => 'TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => fa004d65282160f629f3eb2a5c9dca1d, NAME => 'TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:19:04,428 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/.tmp/A/156cba7e40574305a18c45c0f18f3b0c is 50, key is test_row_0/A:col10/1733480344419/Put/seqid=0 2024-12-06T10:19:04,431 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742400_1576 (size=16931) 2024-12-06T10:19:04,433 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:04,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 97 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51492 deadline: 1733480404428, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:04,433 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:04,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 99 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51440 deadline: 1733480404430, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:04,437 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:04,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 103 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51434 deadline: 1733480404431, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:04,437 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:04,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 94 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51466 deadline: 1733480404431, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:04,437 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:04,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 97 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51460 deadline: 1733480404433, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:04,538 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:04,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 99 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51492 deadline: 1733480404534, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:04,538 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:04,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 101 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51440 deadline: 1733480404534, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:04,543 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:04,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 105 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51434 deadline: 1733480404538, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:04,543 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:04,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51466 deadline: 1733480404538, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:04,543 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:04,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 99 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51460 deadline: 1733480404539, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:04,579 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,33397,1733480204743 2024-12-06T10:19:04,579 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33397 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=139 2024-12-06T10:19:04,580 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d. 2024-12-06T10:19:04,580 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d. as already flushing 2024-12-06T10:19:04,580 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d. 2024-12-06T10:19:04,580 ERROR [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] handler.RSProcedureHandler(58): pid=139 java.io.IOException: Unable to complete flush {ENCODED => fa004d65282160f629f3eb2a5c9dca1d, NAME => 'TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:19:04,580 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=139 java.io.IOException: Unable to complete flush {ENCODED => fa004d65282160f629f3eb2a5c9dca1d, NAME => 'TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:19:04,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33169 {}] master.HMaster(4114): Remote procedure failed, pid=139 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => fa004d65282160f629f3eb2a5c9dca1d, NAME => 'TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => fa004d65282160f629f3eb2a5c9dca1d, NAME => 'TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:19:04,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=138 2024-12-06T10:19:04,732 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,33397,1733480204743 2024-12-06T10:19:04,732 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33397 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=139 2024-12-06T10:19:04,732 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d. 2024-12-06T10:19:04,732 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d. as already flushing 2024-12-06T10:19:04,732 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d. 2024-12-06T10:19:04,732 ERROR [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] handler.RSProcedureHandler(58): pid=139 java.io.IOException: Unable to complete flush {ENCODED => fa004d65282160f629f3eb2a5c9dca1d, NAME => 'TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:19:04,732 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=139 java.io.IOException: Unable to complete flush {ENCODED => fa004d65282160f629f3eb2a5c9dca1d, NAME => 'TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:19:04,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33169 {}] master.HMaster(4114): Remote procedure failed, pid=139 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => fa004d65282160f629f3eb2a5c9dca1d, NAME => 'TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => fa004d65282160f629f3eb2a5c9dca1d, NAME => 'TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:19:04,743 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:04,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 101 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51492 deadline: 1733480404740, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:04,745 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:04,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 103 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51440 deadline: 1733480404740, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:04,746 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:04,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51434 deadline: 1733480404743, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:04,747 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:04,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51466 deadline: 1733480404744, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:04,747 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:04,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 101 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51460 deadline: 1733480404744, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:04,760 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/.tmp/B/a5d29ea9fdc043519bc0abe811215b16 as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/B/a5d29ea9fdc043519bc0abe811215b16 2024-12-06T10:19:04,765 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in fa004d65282160f629f3eb2a5c9dca1d/B of fa004d65282160f629f3eb2a5c9dca1d into a5d29ea9fdc043519bc0abe811215b16(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-06T10:19:04,765 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for fa004d65282160f629f3eb2a5c9dca1d: 2024-12-06T10:19:04,765 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d., storeName=fa004d65282160f629f3eb2a5c9dca1d/B, priority=13, startTime=1733480344329; duration=0sec 2024-12-06T10:19:04,765 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T10:19:04,765 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: fa004d65282160f629f3eb2a5c9dca1d:B 2024-12-06T10:19:04,798 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/.tmp/C/6485528518224d73b1fbddc371c4856b as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/C/6485528518224d73b1fbddc371c4856b 2024-12-06T10:19:04,802 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in fa004d65282160f629f3eb2a5c9dca1d/C of fa004d65282160f629f3eb2a5c9dca1d into 6485528518224d73b1fbddc371c4856b(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-06T10:19:04,802 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for fa004d65282160f629f3eb2a5c9dca1d: 2024-12-06T10:19:04,802 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d., storeName=fa004d65282160f629f3eb2a5c9dca1d/C, priority=13, startTime=1733480344329; duration=0sec 2024-12-06T10:19:04,802 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T10:19:04,802 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: fa004d65282160f629f3eb2a5c9dca1d:C 2024-12-06T10:19:04,832 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=208 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/.tmp/A/156cba7e40574305a18c45c0f18f3b0c 2024-12-06T10:19:04,838 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/.tmp/B/c10a8b2927074400bb1373e1fb053cea is 50, key is test_row_0/B:col10/1733480344419/Put/seqid=0 2024-12-06T10:19:04,842 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742401_1577 (size=12151) 2024-12-06T10:19:04,884 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,33397,1733480204743 2024-12-06T10:19:04,885 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33397 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=139 2024-12-06T10:19:04,885 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d. 2024-12-06T10:19:04,885 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d. as already flushing 2024-12-06T10:19:04,885 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d. 2024-12-06T10:19:04,885 ERROR [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] handler.RSProcedureHandler(58): pid=139 java.io.IOException: Unable to complete flush {ENCODED => fa004d65282160f629f3eb2a5c9dca1d, NAME => 'TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:19:04,885 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=139 java.io.IOException: Unable to complete flush {ENCODED => fa004d65282160f629f3eb2a5c9dca1d, NAME => 'TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:19:04,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33169 {}] master.HMaster(4114): Remote procedure failed, pid=139 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => fa004d65282160f629f3eb2a5c9dca1d, NAME => 'TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => fa004d65282160f629f3eb2a5c9dca1d, NAME => 'TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:19:05,036 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,33397,1733480204743 2024-12-06T10:19:05,036 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33397 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=139 2024-12-06T10:19:05,037 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d. 2024-12-06T10:19:05,037 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d. as already flushing 2024-12-06T10:19:05,037 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d. 2024-12-06T10:19:05,037 ERROR [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] handler.RSProcedureHandler(58): pid=139 java.io.IOException: Unable to complete flush {ENCODED => fa004d65282160f629f3eb2a5c9dca1d, NAME => 'TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:19:05,037 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=139 java.io.IOException: Unable to complete flush {ENCODED => fa004d65282160f629f3eb2a5c9dca1d, NAME => 'TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:19:05,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33169 {}] master.HMaster(4114): Remote procedure failed, pid=139 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => fa004d65282160f629f3eb2a5c9dca1d, NAME => 'TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => fa004d65282160f629f3eb2a5c9dca1d, NAME => 'TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:19:05,049 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:05,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 103 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51492 deadline: 1733480405046, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:05,049 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:05,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 105 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51440 deadline: 1733480405046, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:05,050 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:05,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 109 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51434 deadline: 1733480405047, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:05,051 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:05,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 103 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51460 deadline: 1733480405048, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:05,051 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:05,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51466 deadline: 1733480405049, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:05,189 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,33397,1733480204743 2024-12-06T10:19:05,189 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33397 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=139 2024-12-06T10:19:05,189 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d. 2024-12-06T10:19:05,189 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d. as already flushing 2024-12-06T10:19:05,189 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d. 2024-12-06T10:19:05,189 ERROR [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] handler.RSProcedureHandler(58): pid=139 java.io.IOException: Unable to complete flush {ENCODED => fa004d65282160f629f3eb2a5c9dca1d, NAME => 'TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:19:05,190 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=139 java.io.IOException: Unable to complete flush {ENCODED => fa004d65282160f629f3eb2a5c9dca1d, NAME => 'TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:19:05,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33169 {}] master.HMaster(4114): Remote procedure failed, pid=139 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => fa004d65282160f629f3eb2a5c9dca1d, NAME => 'TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => fa004d65282160f629f3eb2a5c9dca1d, NAME => 'TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:19:05,222 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=138 2024-12-06T10:19:05,243 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=208 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/.tmp/B/c10a8b2927074400bb1373e1fb053cea 2024-12-06T10:19:05,249 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/.tmp/C/3f5fe7ff58aa48d4a40c068f838492ca is 50, key is test_row_0/C:col10/1733480344419/Put/seqid=0 2024-12-06T10:19:05,253 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742402_1578 (size=12151) 2024-12-06T10:19:05,341 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,33397,1733480204743 2024-12-06T10:19:05,342 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33397 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=139 2024-12-06T10:19:05,342 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d. 2024-12-06T10:19:05,342 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d. as already flushing 2024-12-06T10:19:05,342 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d. 2024-12-06T10:19:05,342 ERROR [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] handler.RSProcedureHandler(58): pid=139 java.io.IOException: Unable to complete flush {ENCODED => fa004d65282160f629f3eb2a5c9dca1d, NAME => 'TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:19:05,342 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=139 java.io.IOException: Unable to complete flush {ENCODED => fa004d65282160f629f3eb2a5c9dca1d, NAME => 'TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:19:05,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33169 {}] master.HMaster(4114): Remote procedure failed, pid=139 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => fa004d65282160f629f3eb2a5c9dca1d, NAME => 'TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => fa004d65282160f629f3eb2a5c9dca1d, NAME => 'TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:19:05,494 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,33397,1733480204743 2024-12-06T10:19:05,494 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33397 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=139 2024-12-06T10:19:05,494 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d. 2024-12-06T10:19:05,494 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d. as already flushing 2024-12-06T10:19:05,495 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d. 2024-12-06T10:19:05,495 ERROR [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] handler.RSProcedureHandler(58): pid=139 java.io.IOException: Unable to complete flush {ENCODED => fa004d65282160f629f3eb2a5c9dca1d, NAME => 'TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:19:05,495 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=139 java.io.IOException: Unable to complete flush {ENCODED => fa004d65282160f629f3eb2a5c9dca1d, NAME => 'TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:19:05,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33169 {}] master.HMaster(4114): Remote procedure failed, pid=139 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => fa004d65282160f629f3eb2a5c9dca1d, NAME => 'TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => fa004d65282160f629f3eb2a5c9dca1d, NAME => 'TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:19:05,552 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:05,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51440 deadline: 1733480405551, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:05,552 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:05,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 111 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51434 deadline: 1733480405551, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:05,554 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:05,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 105 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51492 deadline: 1733480405551, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:05,559 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:05,559 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:05,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51466 deadline: 1733480405553, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:05,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 105 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51460 deadline: 1733480405555, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:05,646 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,33397,1733480204743 2024-12-06T10:19:05,647 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33397 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=139 2024-12-06T10:19:05,647 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d. 2024-12-06T10:19:05,647 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d. as already flushing 2024-12-06T10:19:05,647 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d. 2024-12-06T10:19:05,647 ERROR [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] handler.RSProcedureHandler(58): pid=139 java.io.IOException: Unable to complete flush {ENCODED => fa004d65282160f629f3eb2a5c9dca1d, NAME => 'TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:19:05,647 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=139 java.io.IOException: Unable to complete flush {ENCODED => fa004d65282160f629f3eb2a5c9dca1d, NAME => 'TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:19:05,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33169 {}] master.HMaster(4114): Remote procedure failed, pid=139 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => fa004d65282160f629f3eb2a5c9dca1d, NAME => 'TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => fa004d65282160f629f3eb2a5c9dca1d, NAME => 'TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:19:05,653 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=208 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/.tmp/C/3f5fe7ff58aa48d4a40c068f838492ca 2024-12-06T10:19:05,657 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/.tmp/A/156cba7e40574305a18c45c0f18f3b0c as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/A/156cba7e40574305a18c45c0f18f3b0c 2024-12-06T10:19:05,660 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/A/156cba7e40574305a18c45c0f18f3b0c, entries=250, sequenceid=208, filesize=16.5 K 2024-12-06T10:19:05,661 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/.tmp/B/c10a8b2927074400bb1373e1fb053cea as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/B/c10a8b2927074400bb1373e1fb053cea 2024-12-06T10:19:05,663 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/B/c10a8b2927074400bb1373e1fb053cea, entries=150, sequenceid=208, filesize=11.9 K 2024-12-06T10:19:05,664 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/.tmp/C/3f5fe7ff58aa48d4a40c068f838492ca as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/C/3f5fe7ff58aa48d4a40c068f838492ca 2024-12-06T10:19:05,667 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/C/3f5fe7ff58aa48d4a40c068f838492ca, entries=150, sequenceid=208, filesize=11.9 K 2024-12-06T10:19:05,668 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~161.02 KB/164880, heapSize ~422.58 KB/432720, currentSize=40.25 KB/41220 for fa004d65282160f629f3eb2a5c9dca1d in 1247ms, sequenceid=208, compaction requested=false 2024-12-06T10:19:05,668 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for fa004d65282160f629f3eb2a5c9dca1d: 2024-12-06T10:19:05,799 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,33397,1733480204743 2024-12-06T10:19:05,800 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33397 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=139 2024-12-06T10:19:05,800 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d. 2024-12-06T10:19:05,800 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegion(2837): Flushing fa004d65282160f629f3eb2a5c9dca1d 3/3 column families, dataSize=40.25 KB heapSize=106.22 KB 2024-12-06T10:19:05,800 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.CompactingMemStore(205): FLUSHING TO DISK fa004d65282160f629f3eb2a5c9dca1d, store=A 2024-12-06T10:19:05,800 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:19:05,800 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.CompactingMemStore(205): FLUSHING TO DISK fa004d65282160f629f3eb2a5c9dca1d, store=B 2024-12-06T10:19:05,800 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:19:05,801 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.CompactingMemStore(205): FLUSHING TO DISK fa004d65282160f629f3eb2a5c9dca1d, store=C 2024-12-06T10:19:05,801 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:19:05,805 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/.tmp/A/adc38d33d639496a80e29e668623af2f is 50, key is test_row_0/A:col10/1733480344426/Put/seqid=0 2024-12-06T10:19:05,809 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742403_1579 (size=12151) 2024-12-06T10:19:05,811 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.42 KB at sequenceid=219 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/.tmp/A/adc38d33d639496a80e29e668623af2f 2024-12-06T10:19:05,819 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/.tmp/B/096b06f5d8d147edaab606543bca35b6 is 50, key is test_row_0/B:col10/1733480344426/Put/seqid=0 2024-12-06T10:19:05,823 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742404_1580 (size=12151) 2024-12-06T10:19:06,222 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=138 2024-12-06T10:19:06,224 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.42 KB at sequenceid=219 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/.tmp/B/096b06f5d8d147edaab606543bca35b6 2024-12-06T10:19:06,230 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/.tmp/C/f077d6017e764d4997eabb08d666a9d7 is 50, key is test_row_0/C:col10/1733480344426/Put/seqid=0 2024-12-06T10:19:06,236 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742405_1581 (size=12151) 2024-12-06T10:19:06,236 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.42 KB at sequenceid=219 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/.tmp/C/f077d6017e764d4997eabb08d666a9d7 2024-12-06T10:19:06,240 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/.tmp/A/adc38d33d639496a80e29e668623af2f as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/A/adc38d33d639496a80e29e668623af2f 2024-12-06T10:19:06,243 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/A/adc38d33d639496a80e29e668623af2f, entries=150, sequenceid=219, filesize=11.9 K 2024-12-06T10:19:06,244 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/.tmp/B/096b06f5d8d147edaab606543bca35b6 as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/B/096b06f5d8d147edaab606543bca35b6 2024-12-06T10:19:06,247 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/B/096b06f5d8d147edaab606543bca35b6, entries=150, sequenceid=219, filesize=11.9 K 2024-12-06T10:19:06,247 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/.tmp/C/f077d6017e764d4997eabb08d666a9d7 as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/C/f077d6017e764d4997eabb08d666a9d7 2024-12-06T10:19:06,251 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/C/f077d6017e764d4997eabb08d666a9d7, entries=150, sequenceid=219, filesize=11.9 K 2024-12-06T10:19:06,251 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegion(3040): Finished flush of dataSize ~40.25 KB/41220, heapSize ~106.17 KB/108720, currentSize=0 B/0 for fa004d65282160f629f3eb2a5c9dca1d in 451ms, sequenceid=219, compaction requested=true 2024-12-06T10:19:06,252 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegion(2538): Flush status journal for fa004d65282160f629f3eb2a5c9dca1d: 2024-12-06T10:19:06,252 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d. 2024-12-06T10:19:06,252 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=139 2024-12-06T10:19:06,252 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33169 {}] master.HMaster(4106): Remote procedure done, pid=139 2024-12-06T10:19:06,254 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=139, resume processing ppid=138 2024-12-06T10:19:06,254 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=139, ppid=138, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.1340 sec 2024-12-06T10:19:06,255 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=138, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=138, table=TestAcidGuarantees in 2.1380 sec 2024-12-06T10:19:06,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(8581): Flush requested on fa004d65282160f629f3eb2a5c9dca1d 2024-12-06T10:19:06,569 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing fa004d65282160f629f3eb2a5c9dca1d 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-06T10:19:06,569 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK fa004d65282160f629f3eb2a5c9dca1d, store=A 2024-12-06T10:19:06,569 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:19:06,569 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK fa004d65282160f629f3eb2a5c9dca1d, store=B 2024-12-06T10:19:06,569 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:19:06,569 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK fa004d65282160f629f3eb2a5c9dca1d, store=C 2024-12-06T10:19:06,569 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:19:06,573 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/.tmp/A/dbc05e765bd04f158a3d1876604cc91e is 50, key is test_row_0/A:col10/1733480346565/Put/seqid=0 2024-12-06T10:19:06,577 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742406_1582 (size=16931) 2024-12-06T10:19:06,609 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:06,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 110 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51466 deadline: 1733480406600, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:06,609 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:06,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 111 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51460 deadline: 1733480406601, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:06,613 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:06,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 116 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51440 deadline: 1733480406608, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:06,615 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:06,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 113 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51492 deadline: 1733480406609, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:06,615 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:06,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 120 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51434 deadline: 1733480406609, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:06,715 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:06,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 112 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51466 deadline: 1733480406711, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:06,716 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:06,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 113 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51460 deadline: 1733480406711, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:06,718 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:06,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 118 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51440 deadline: 1733480406714, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:06,719 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:06,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 115 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51492 deadline: 1733480406716, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:06,719 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:06,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 122 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51434 deadline: 1733480406716, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:06,921 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:06,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 114 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51466 deadline: 1733480406916, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:06,921 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:06,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 115 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51460 deadline: 1733480406917, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:06,926 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:06,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 120 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51440 deadline: 1733480406919, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:06,926 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:06,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 117 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51492 deadline: 1733480406919, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:06,926 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:06,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 124 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51434 deadline: 1733480406921, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:06,981 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=230 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/.tmp/A/dbc05e765bd04f158a3d1876604cc91e 2024-12-06T10:19:06,986 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/.tmp/B/8b60835b913e49c98cf75cf2db1e996c is 50, key is test_row_0/B:col10/1733480346565/Put/seqid=0 2024-12-06T10:19:06,989 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742407_1583 (size=12151) 2024-12-06T10:19:07,226 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:07,227 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 117 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51460 deadline: 1733480407222, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:07,227 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:07,227 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 116 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51466 deadline: 1733480407223, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:07,229 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:07,229 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 119 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51492 deadline: 1733480407227, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:07,233 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:07,233 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 126 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51434 deadline: 1733480407228, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:07,233 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:07,233 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 122 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51440 deadline: 1733480407229, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:07,390 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=230 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/.tmp/B/8b60835b913e49c98cf75cf2db1e996c 2024-12-06T10:19:07,396 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/.tmp/C/62d6a3bcf514484787d984efc0c74d09 is 50, key is test_row_0/C:col10/1733480346565/Put/seqid=0 2024-12-06T10:19:07,399 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742408_1584 (size=12151) 2024-12-06T10:19:07,735 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:07,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 119 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51460 deadline: 1733480407730, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:07,737 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:07,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 121 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51492 deadline: 1733480407731, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:07,737 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:07,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 118 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51466 deadline: 1733480407732, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:07,741 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:07,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 124 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51440 deadline: 1733480407734, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:07,741 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:07,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 128 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51434 deadline: 1733480407737, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:07,799 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=230 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/.tmp/C/62d6a3bcf514484787d984efc0c74d09 2024-12-06T10:19:07,804 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/.tmp/A/dbc05e765bd04f158a3d1876604cc91e as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/A/dbc05e765bd04f158a3d1876604cc91e 2024-12-06T10:19:07,809 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/A/dbc05e765bd04f158a3d1876604cc91e, entries=250, sequenceid=230, filesize=16.5 K 2024-12-06T10:19:07,810 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/.tmp/B/8b60835b913e49c98cf75cf2db1e996c as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/B/8b60835b913e49c98cf75cf2db1e996c 2024-12-06T10:19:07,819 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/B/8b60835b913e49c98cf75cf2db1e996c, entries=150, sequenceid=230, filesize=11.9 K 2024-12-06T10:19:07,820 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/.tmp/C/62d6a3bcf514484787d984efc0c74d09 as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/C/62d6a3bcf514484787d984efc0c74d09 2024-12-06T10:19:07,828 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/C/62d6a3bcf514484787d984efc0c74d09, entries=150, sequenceid=230, filesize=11.9 K 2024-12-06T10:19:07,829 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for fa004d65282160f629f3eb2a5c9dca1d in 1260ms, sequenceid=230, compaction requested=true 2024-12-06T10:19:07,829 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for fa004d65282160f629f3eb2a5c9dca1d: 2024-12-06T10:19:07,829 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store fa004d65282160f629f3eb2a5c9dca1d:A, priority=-2147483648, current under compaction store size is 1 2024-12-06T10:19:07,829 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T10:19:07,829 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-06T10:19:07,829 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-06T10:19:07,829 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store fa004d65282160f629f3eb2a5c9dca1d:B, priority=-2147483648, current under compaction store size is 2 2024-12-06T10:19:07,830 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T10:19:07,830 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store fa004d65282160f629f3eb2a5c9dca1d:C, priority=-2147483648, current under compaction store size is 3 2024-12-06T10:19:07,830 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-06T10:19:07,832 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49048 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-06T10:19:07,832 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HStore(1540): fa004d65282160f629f3eb2a5c9dca1d/B is initiating minor compaction (all files) 2024-12-06T10:19:07,832 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of fa004d65282160f629f3eb2a5c9dca1d/B in TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d. 2024-12-06T10:19:07,832 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/B/a5d29ea9fdc043519bc0abe811215b16, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/B/c10a8b2927074400bb1373e1fb053cea, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/B/096b06f5d8d147edaab606543bca35b6, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/B/8b60835b913e49c98cf75cf2db1e996c] into tmpdir=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/.tmp, totalSize=47.9 K 2024-12-06T10:19:07,833 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.Compactor(224): Compacting a5d29ea9fdc043519bc0abe811215b16, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=180, earliestPutTs=1733480344264 2024-12-06T10:19:07,833 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 58608 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-06T10:19:07,833 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HStore(1540): fa004d65282160f629f3eb2a5c9dca1d/A is initiating minor compaction (all files) 2024-12-06T10:19:07,833 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of fa004d65282160f629f3eb2a5c9dca1d/A in TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d. 2024-12-06T10:19:07,833 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/A/60a2d5a4cce74fbba3fab7f61d0a27ec, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/A/156cba7e40574305a18c45c0f18f3b0c, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/A/adc38d33d639496a80e29e668623af2f, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/A/dbc05e765bd04f158a3d1876604cc91e] into tmpdir=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/.tmp, totalSize=57.2 K 2024-12-06T10:19:07,833 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.Compactor(224): Compacting c10a8b2927074400bb1373e1fb053cea, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=208, earliestPutTs=1733480344311 2024-12-06T10:19:07,833 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.Compactor(224): Compacting 60a2d5a4cce74fbba3fab7f61d0a27ec, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=180, earliestPutTs=1733480344264 2024-12-06T10:19:07,834 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.Compactor(224): Compacting 096b06f5d8d147edaab606543bca35b6, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=219, earliestPutTs=1733480344426 2024-12-06T10:19:07,834 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.Compactor(224): Compacting 156cba7e40574305a18c45c0f18f3b0c, keycount=250, bloomtype=ROW, size=16.5 K, encoding=NONE, compression=NONE, seqNum=208, earliestPutTs=1733480344310 2024-12-06T10:19:07,834 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.Compactor(224): Compacting 8b60835b913e49c98cf75cf2db1e996c, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=230, earliestPutTs=1733480346565 2024-12-06T10:19:07,834 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.Compactor(224): Compacting adc38d33d639496a80e29e668623af2f, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=219, earliestPutTs=1733480344426 2024-12-06T10:19:07,834 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.Compactor(224): Compacting dbc05e765bd04f158a3d1876604cc91e, keycount=250, bloomtype=ROW, size=16.5 K, encoding=NONE, compression=NONE, seqNum=230, earliestPutTs=1733480346562 2024-12-06T10:19:07,856 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): fa004d65282160f629f3eb2a5c9dca1d#B#compaction#497 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-06T10:19:07,856 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/.tmp/B/36232e0d12b24de6bb42306212b283ab is 50, key is test_row_0/B:col10/1733480346565/Put/seqid=0 2024-12-06T10:19:07,878 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): fa004d65282160f629f3eb2a5c9dca1d#A#compaction#498 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-06T10:19:07,879 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/.tmp/A/31f422ae20374a96a94b697d7bb76e4c is 50, key is test_row_0/A:col10/1733480346565/Put/seqid=0 2024-12-06T10:19:07,884 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742409_1585 (size=12731) 2024-12-06T10:19:07,889 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/.tmp/B/36232e0d12b24de6bb42306212b283ab as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/B/36232e0d12b24de6bb42306212b283ab 2024-12-06T10:19:07,893 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742410_1586 (size=12731) 2024-12-06T10:19:07,894 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in fa004d65282160f629f3eb2a5c9dca1d/B of fa004d65282160f629f3eb2a5c9dca1d into 36232e0d12b24de6bb42306212b283ab(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-06T10:19:07,894 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for fa004d65282160f629f3eb2a5c9dca1d: 2024-12-06T10:19:07,894 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d., storeName=fa004d65282160f629f3eb2a5c9dca1d/B, priority=12, startTime=1733480347829; duration=0sec 2024-12-06T10:19:07,894 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-06T10:19:07,894 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: fa004d65282160f629f3eb2a5c9dca1d:B 2024-12-06T10:19:07,894 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-06T10:19:07,895 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49048 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-06T10:19:07,895 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HStore(1540): fa004d65282160f629f3eb2a5c9dca1d/C is initiating minor compaction (all files) 2024-12-06T10:19:07,895 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of fa004d65282160f629f3eb2a5c9dca1d/C in TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d. 2024-12-06T10:19:07,896 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/C/6485528518224d73b1fbddc371c4856b, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/C/3f5fe7ff58aa48d4a40c068f838492ca, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/C/f077d6017e764d4997eabb08d666a9d7, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/C/62d6a3bcf514484787d984efc0c74d09] into tmpdir=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/.tmp, totalSize=47.9 K 2024-12-06T10:19:07,896 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.Compactor(224): Compacting 6485528518224d73b1fbddc371c4856b, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=180, earliestPutTs=1733480344264 2024-12-06T10:19:07,898 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.Compactor(224): Compacting 3f5fe7ff58aa48d4a40c068f838492ca, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=208, earliestPutTs=1733480344311 2024-12-06T10:19:07,898 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.Compactor(224): Compacting f077d6017e764d4997eabb08d666a9d7, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=219, earliestPutTs=1733480344426 2024-12-06T10:19:07,898 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.Compactor(224): Compacting 62d6a3bcf514484787d984efc0c74d09, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=230, earliestPutTs=1733480346565 2024-12-06T10:19:07,898 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/.tmp/A/31f422ae20374a96a94b697d7bb76e4c as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/A/31f422ae20374a96a94b697d7bb76e4c 2024-12-06T10:19:07,903 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in fa004d65282160f629f3eb2a5c9dca1d/A of fa004d65282160f629f3eb2a5c9dca1d into 31f422ae20374a96a94b697d7bb76e4c(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-06T10:19:07,903 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for fa004d65282160f629f3eb2a5c9dca1d: 2024-12-06T10:19:07,903 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d., storeName=fa004d65282160f629f3eb2a5c9dca1d/A, priority=12, startTime=1733480347829; duration=0sec 2024-12-06T10:19:07,903 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T10:19:07,903 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: fa004d65282160f629f3eb2a5c9dca1d:A 2024-12-06T10:19:07,910 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): fa004d65282160f629f3eb2a5c9dca1d#C#compaction#499 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-06T10:19:07,911 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/.tmp/C/3f86e39071fc4108bc895f65cbc1498e is 50, key is test_row_0/C:col10/1733480346565/Put/seqid=0 2024-12-06T10:19:07,940 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742411_1587 (size=12731) 2024-12-06T10:19:08,224 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=138 2024-12-06T10:19:08,224 INFO [Thread-2350 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 138 completed 2024-12-06T10:19:08,226 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33169 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-06T10:19:08,227 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33169 {}] procedure2.ProcedureExecutor(1098): Stored pid=140, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=140, table=TestAcidGuarantees 2024-12-06T10:19:08,228 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=140, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=140, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-06T10:19:08,228 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=140 2024-12-06T10:19:08,228 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=140, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=140, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-06T10:19:08,228 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=141, ppid=140, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-06T10:19:08,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=140 2024-12-06T10:19:08,344 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/.tmp/C/3f86e39071fc4108bc895f65cbc1498e as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/C/3f86e39071fc4108bc895f65cbc1498e 2024-12-06T10:19:08,348 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in fa004d65282160f629f3eb2a5c9dca1d/C of fa004d65282160f629f3eb2a5c9dca1d into 3f86e39071fc4108bc895f65cbc1498e(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-06T10:19:08,348 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for fa004d65282160f629f3eb2a5c9dca1d: 2024-12-06T10:19:08,348 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d., storeName=fa004d65282160f629f3eb2a5c9dca1d/C, priority=12, startTime=1733480347830; duration=0sec 2024-12-06T10:19:08,348 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T10:19:08,348 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: fa004d65282160f629f3eb2a5c9dca1d:C 2024-12-06T10:19:08,379 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,33397,1733480204743 2024-12-06T10:19:08,379 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33397 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=141 2024-12-06T10:19:08,379 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d. 2024-12-06T10:19:08,380 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HRegion(2837): Flushing fa004d65282160f629f3eb2a5c9dca1d 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-12-06T10:19:08,380 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.CompactingMemStore(205): FLUSHING TO DISK fa004d65282160f629f3eb2a5c9dca1d, store=A 2024-12-06T10:19:08,380 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:19:08,380 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.CompactingMemStore(205): FLUSHING TO DISK fa004d65282160f629f3eb2a5c9dca1d, store=B 2024-12-06T10:19:08,380 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:19:08,380 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.CompactingMemStore(205): FLUSHING TO DISK fa004d65282160f629f3eb2a5c9dca1d, store=C 2024-12-06T10:19:08,380 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:19:08,384 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/.tmp/A/f2c4d3cf011946ed89b46f75da3e70f8 is 50, key is test_row_0/A:col10/1733480346607/Put/seqid=0 2024-12-06T10:19:08,388 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742412_1588 (size=12151) 2024-12-06T10:19:08,388 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=258 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/.tmp/A/f2c4d3cf011946ed89b46f75da3e70f8 2024-12-06T10:19:08,395 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/.tmp/B/bdaed48f5fd24b7fb5cda1797d639af8 is 50, key is test_row_0/B:col10/1733480346607/Put/seqid=0 2024-12-06T10:19:08,400 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742413_1589 (size=12151) 2024-12-06T10:19:08,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=140 2024-12-06T10:19:08,741 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d. as already flushing 2024-12-06T10:19:08,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(8581): Flush requested on fa004d65282160f629f3eb2a5c9dca1d 2024-12-06T10:19:08,753 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:08,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 126 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51492 deadline: 1733480408749, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:08,754 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:08,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 122 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51460 deadline: 1733480408749, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:08,754 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:08,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 131 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51434 deadline: 1733480408750, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:08,754 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:08,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 126 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51440 deadline: 1733480408751, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:08,758 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:08,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 123 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51466 deadline: 1733480408751, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:08,801 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=258 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/.tmp/B/bdaed48f5fd24b7fb5cda1797d639af8 2024-12-06T10:19:08,808 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/.tmp/C/5f10af9c04464dea9142e8b896636dfa is 50, key is test_row_0/C:col10/1733480346607/Put/seqid=0 2024-12-06T10:19:08,811 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742414_1590 (size=12151) 2024-12-06T10:19:08,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=140 2024-12-06T10:19:08,859 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:08,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 128 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51492 deadline: 1733480408854, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:08,859 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:08,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 124 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51460 deadline: 1733480408855, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:08,859 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:08,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 133 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51434 deadline: 1733480408855, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:08,863 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:08,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 125 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51466 deadline: 1733480408859, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:09,062 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:09,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 130 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51492 deadline: 1733480409060, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:09,063 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:09,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 126 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51460 deadline: 1733480409060, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:09,063 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:09,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 135 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51434 deadline: 1733480409060, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:09,069 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:09,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 127 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51466 deadline: 1733480409065, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:09,212 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=258 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/.tmp/C/5f10af9c04464dea9142e8b896636dfa 2024-12-06T10:19:09,216 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/.tmp/A/f2c4d3cf011946ed89b46f75da3e70f8 as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/A/f2c4d3cf011946ed89b46f75da3e70f8 2024-12-06T10:19:09,219 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/A/f2c4d3cf011946ed89b46f75da3e70f8, entries=150, sequenceid=258, filesize=11.9 K 2024-12-06T10:19:09,220 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/.tmp/B/bdaed48f5fd24b7fb5cda1797d639af8 as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/B/bdaed48f5fd24b7fb5cda1797d639af8 2024-12-06T10:19:09,223 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/B/bdaed48f5fd24b7fb5cda1797d639af8, entries=150, sequenceid=258, filesize=11.9 K 2024-12-06T10:19:09,223 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/.tmp/C/5f10af9c04464dea9142e8b896636dfa as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/C/5f10af9c04464dea9142e8b896636dfa 2024-12-06T10:19:09,226 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/C/5f10af9c04464dea9142e8b896636dfa, entries=150, sequenceid=258, filesize=11.9 K 2024-12-06T10:19:09,227 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=53.67 KB/54960 for fa004d65282160f629f3eb2a5c9dca1d in 848ms, sequenceid=258, compaction requested=false 2024-12-06T10:19:09,227 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HRegion(2538): Flush status journal for fa004d65282160f629f3eb2a5c9dca1d: 2024-12-06T10:19:09,227 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d. 2024-12-06T10:19:09,227 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=141 2024-12-06T10:19:09,227 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33169 {}] master.HMaster(4106): Remote procedure done, pid=141 2024-12-06T10:19:09,229 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=141, resume processing ppid=140 2024-12-06T10:19:09,229 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=141, ppid=140, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.0000 sec 2024-12-06T10:19:09,231 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=140, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=140, table=TestAcidGuarantees in 1.0030 sec 2024-12-06T10:19:09,330 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=140 2024-12-06T10:19:09,331 INFO [Thread-2350 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 140 completed 2024-12-06T10:19:09,332 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33169 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-06T10:19:09,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33169 {}] procedure2.ProcedureExecutor(1098): Stored pid=142, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=142, table=TestAcidGuarantees 2024-12-06T10:19:09,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=142 2024-12-06T10:19:09,333 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=142, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=142, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-06T10:19:09,334 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=142, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=142, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-06T10:19:09,334 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=143, ppid=142, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-06T10:19:09,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(8581): Flush requested on fa004d65282160f629f3eb2a5c9dca1d 2024-12-06T10:19:09,368 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing fa004d65282160f629f3eb2a5c9dca1d 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-12-06T10:19:09,368 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK fa004d65282160f629f3eb2a5c9dca1d, store=A 2024-12-06T10:19:09,368 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:19:09,368 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK fa004d65282160f629f3eb2a5c9dca1d, store=B 2024-12-06T10:19:09,368 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:19:09,368 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK fa004d65282160f629f3eb2a5c9dca1d, store=C 2024-12-06T10:19:09,368 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:19:09,372 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/.tmp/A/ccc406ad963f4dca98898037e8a8b220 is 50, key is test_row_0/A:col10/1733480348749/Put/seqid=0 2024-12-06T10:19:09,375 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742415_1591 (size=12301) 2024-12-06T10:19:09,411 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:09,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 133 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51466 deadline: 1733480409403, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:09,411 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:09,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 138 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51492 deadline: 1733480409406, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:09,413 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:09,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 134 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51460 deadline: 1733480409407, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:09,413 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:09,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 143 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51434 deadline: 1733480409408, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:09,434 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=142 2024-12-06T10:19:09,487 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,33397,1733480204743 2024-12-06T10:19:09,487 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33397 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=143 2024-12-06T10:19:09,487 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d. 2024-12-06T10:19:09,487 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d. as already flushing 2024-12-06T10:19:09,487 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d. 2024-12-06T10:19:09,487 ERROR [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] handler.RSProcedureHandler(58): pid=143 java.io.IOException: Unable to complete flush {ENCODED => fa004d65282160f629f3eb2a5c9dca1d, NAME => 'TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:19:09,487 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=143 java.io.IOException: Unable to complete flush {ENCODED => fa004d65282160f629f3eb2a5c9dca1d, NAME => 'TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:19:09,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33169 {}] master.HMaster(4114): Remote procedure failed, pid=143 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => fa004d65282160f629f3eb2a5c9dca1d, NAME => 'TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => fa004d65282160f629f3eb2a5c9dca1d, NAME => 'TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:19:09,517 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:09,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 135 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51466 deadline: 1733480409512, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:09,518 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:09,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 140 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51492 deadline: 1733480409512, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:09,518 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:09,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 136 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51460 deadline: 1733480409514, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:09,518 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:09,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 145 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51434 deadline: 1733480409514, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:09,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=142 2024-12-06T10:19:09,639 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,33397,1733480204743 2024-12-06T10:19:09,639 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33397 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=143 2024-12-06T10:19:09,640 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d. 2024-12-06T10:19:09,640 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d. as already flushing 2024-12-06T10:19:09,640 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d. 2024-12-06T10:19:09,640 ERROR [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] handler.RSProcedureHandler(58): pid=143 java.io.IOException: Unable to complete flush {ENCODED => fa004d65282160f629f3eb2a5c9dca1d, NAME => 'TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:19:09,640 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=143 java.io.IOException: Unable to complete flush {ENCODED => fa004d65282160f629f3eb2a5c9dca1d, NAME => 'TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:19:09,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33169 {}] master.HMaster(4114): Remote procedure failed, pid=143 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => fa004d65282160f629f3eb2a5c9dca1d, NAME => 'TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => fa004d65282160f629f3eb2a5c9dca1d, NAME => 'TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:19:09,721 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:09,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 142 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51492 deadline: 1733480409720, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:09,726 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:09,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 147 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51434 deadline: 1733480409720, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:09,726 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:09,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 137 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51466 deadline: 1733480409720, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:09,726 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:09,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 138 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51460 deadline: 1733480409720, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:09,776 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=270 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/.tmp/A/ccc406ad963f4dca98898037e8a8b220 2024-12-06T10:19:09,782 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/.tmp/B/6db3f4694bcb4231924e1212a8d985e8 is 50, key is test_row_0/B:col10/1733480348749/Put/seqid=0 2024-12-06T10:19:09,789 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742416_1592 (size=12301) 2024-12-06T10:19:09,792 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,33397,1733480204743 2024-12-06T10:19:09,792 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33397 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=143 2024-12-06T10:19:09,792 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d. 2024-12-06T10:19:09,792 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d. as already flushing 2024-12-06T10:19:09,792 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d. 2024-12-06T10:19:09,792 ERROR [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] handler.RSProcedureHandler(58): pid=143 java.io.IOException: Unable to complete flush {ENCODED => fa004d65282160f629f3eb2a5c9dca1d, NAME => 'TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:19:09,792 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=143 java.io.IOException: Unable to complete flush {ENCODED => fa004d65282160f629f3eb2a5c9dca1d, NAME => 'TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:19:09,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33169 {}] master.HMaster(4114): Remote procedure failed, pid=143 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => fa004d65282160f629f3eb2a5c9dca1d, NAME => 'TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => fa004d65282160f629f3eb2a5c9dca1d, NAME => 'TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:19:09,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=142 2024-12-06T10:19:09,944 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,33397,1733480204743 2024-12-06T10:19:09,944 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33397 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=143 2024-12-06T10:19:09,944 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d. 2024-12-06T10:19:09,945 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d. as already flushing 2024-12-06T10:19:09,945 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d. 2024-12-06T10:19:09,945 ERROR [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] handler.RSProcedureHandler(58): pid=143 java.io.IOException: Unable to complete flush {ENCODED => fa004d65282160f629f3eb2a5c9dca1d, NAME => 'TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:19:09,945 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=143 java.io.IOException: Unable to complete flush {ENCODED => fa004d65282160f629f3eb2a5c9dca1d, NAME => 'TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:19:09,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33169 {}] master.HMaster(4114): Remote procedure failed, pid=143 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => fa004d65282160f629f3eb2a5c9dca1d, NAME => 'TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => fa004d65282160f629f3eb2a5c9dca1d, NAME => 'TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:19:10,025 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:10,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 144 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51492 deadline: 1733480410022, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:10,031 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:10,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 149 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51434 deadline: 1733480410027, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:10,031 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:10,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 139 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51466 deadline: 1733480410027, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:10,031 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:10,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 140 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51460 deadline: 1733480410028, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:10,097 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,33397,1733480204743 2024-12-06T10:19:10,097 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33397 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=143 2024-12-06T10:19:10,097 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d. 2024-12-06T10:19:10,097 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d. as already flushing 2024-12-06T10:19:10,097 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d. 2024-12-06T10:19:10,097 ERROR [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] handler.RSProcedureHandler(58): pid=143 java.io.IOException: Unable to complete flush {ENCODED => fa004d65282160f629f3eb2a5c9dca1d, NAME => 'TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:19:10,098 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=143 java.io.IOException: Unable to complete flush {ENCODED => fa004d65282160f629f3eb2a5c9dca1d, NAME => 'TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:19:10,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33169 {}] master.HMaster(4114): Remote procedure failed, pid=143 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => fa004d65282160f629f3eb2a5c9dca1d, NAME => 'TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => fa004d65282160f629f3eb2a5c9dca1d, NAME => 'TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:19:10,190 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=270 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/.tmp/B/6db3f4694bcb4231924e1212a8d985e8 2024-12-06T10:19:10,210 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/.tmp/C/ff4e0554cb964672b797c1794dba395d is 50, key is test_row_0/C:col10/1733480348749/Put/seqid=0 2024-12-06T10:19:10,226 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742417_1593 (size=12301) 2024-12-06T10:19:10,227 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=270 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/.tmp/C/ff4e0554cb964672b797c1794dba395d 2024-12-06T10:19:10,231 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/.tmp/A/ccc406ad963f4dca98898037e8a8b220 as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/A/ccc406ad963f4dca98898037e8a8b220 2024-12-06T10:19:10,236 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/A/ccc406ad963f4dca98898037e8a8b220, entries=150, sequenceid=270, filesize=12.0 K 2024-12-06T10:19:10,236 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/.tmp/B/6db3f4694bcb4231924e1212a8d985e8 as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/B/6db3f4694bcb4231924e1212a8d985e8 2024-12-06T10:19:10,239 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/B/6db3f4694bcb4231924e1212a8d985e8, entries=150, sequenceid=270, filesize=12.0 K 2024-12-06T10:19:10,240 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/.tmp/C/ff4e0554cb964672b797c1794dba395d as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/C/ff4e0554cb964672b797c1794dba395d 2024-12-06T10:19:10,244 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/C/ff4e0554cb964672b797c1794dba395d, entries=150, sequenceid=270, filesize=12.0 K 2024-12-06T10:19:10,244 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for fa004d65282160f629f3eb2a5c9dca1d in 876ms, sequenceid=270, compaction requested=true 2024-12-06T10:19:10,244 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for fa004d65282160f629f3eb2a5c9dca1d: 2024-12-06T10:19:10,244 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store fa004d65282160f629f3eb2a5c9dca1d:A, priority=-2147483648, current under compaction store size is 1 2024-12-06T10:19:10,245 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T10:19:10,245 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-06T10:19:10,245 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store fa004d65282160f629f3eb2a5c9dca1d:B, priority=-2147483648, current under compaction store size is 2 2024-12-06T10:19:10,245 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-06T10:19:10,245 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store fa004d65282160f629f3eb2a5c9dca1d:C, priority=-2147483648, current under compaction store size is 3 2024-12-06T10:19:10,245 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-12-06T10:19:10,245 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-06T10:19:10,246 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37183 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-06T10:19:10,246 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HStore(1540): fa004d65282160f629f3eb2a5c9dca1d/A is initiating minor compaction (all files) 2024-12-06T10:19:10,246 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of fa004d65282160f629f3eb2a5c9dca1d/A in TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d. 2024-12-06T10:19:10,246 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37183 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-06T10:19:10,246 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HStore(1540): fa004d65282160f629f3eb2a5c9dca1d/B is initiating minor compaction (all files) 2024-12-06T10:19:10,246 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of fa004d65282160f629f3eb2a5c9dca1d/B in TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d. 2024-12-06T10:19:10,246 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/A/31f422ae20374a96a94b697d7bb76e4c, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/A/f2c4d3cf011946ed89b46f75da3e70f8, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/A/ccc406ad963f4dca98898037e8a8b220] into tmpdir=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/.tmp, totalSize=36.3 K 2024-12-06T10:19:10,246 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/B/36232e0d12b24de6bb42306212b283ab, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/B/bdaed48f5fd24b7fb5cda1797d639af8, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/B/6db3f4694bcb4231924e1212a8d985e8] into tmpdir=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/.tmp, totalSize=36.3 K 2024-12-06T10:19:10,246 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.Compactor(224): Compacting 31f422ae20374a96a94b697d7bb76e4c, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=230, earliestPutTs=1733480346565 2024-12-06T10:19:10,246 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.Compactor(224): Compacting 36232e0d12b24de6bb42306212b283ab, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=230, earliestPutTs=1733480346565 2024-12-06T10:19:10,247 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.Compactor(224): Compacting f2c4d3cf011946ed89b46f75da3e70f8, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=258, earliestPutTs=1733480346598 2024-12-06T10:19:10,247 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.Compactor(224): Compacting bdaed48f5fd24b7fb5cda1797d639af8, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=258, earliestPutTs=1733480346598 2024-12-06T10:19:10,247 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.Compactor(224): Compacting 6db3f4694bcb4231924e1212a8d985e8, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=270, earliestPutTs=1733480348749 2024-12-06T10:19:10,247 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.Compactor(224): Compacting ccc406ad963f4dca98898037e8a8b220, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=270, earliestPutTs=1733480348749 2024-12-06T10:19:10,249 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,33397,1733480204743 2024-12-06T10:19:10,250 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33397 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=143 2024-12-06T10:19:10,251 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d. 2024-12-06T10:19:10,251 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HRegion(2837): Flushing fa004d65282160f629f3eb2a5c9dca1d 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-12-06T10:19:10,251 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.CompactingMemStore(205): FLUSHING TO DISK fa004d65282160f629f3eb2a5c9dca1d, store=A 2024-12-06T10:19:10,251 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:19:10,251 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.CompactingMemStore(205): FLUSHING TO DISK fa004d65282160f629f3eb2a5c9dca1d, store=B 2024-12-06T10:19:10,251 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:19:10,251 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.CompactingMemStore(205): FLUSHING TO DISK fa004d65282160f629f3eb2a5c9dca1d, store=C 2024-12-06T10:19:10,251 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:19:10,265 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/.tmp/A/c234e6c2a19c4223830f8ce5621ef419 is 50, key is test_row_0/A:col10/1733480349407/Put/seqid=0 2024-12-06T10:19:10,274 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): fa004d65282160f629f3eb2a5c9dca1d#B#compaction#507 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-06T10:19:10,274 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/.tmp/B/7039f35d1a444aa19b21d62acc5ef5b0 is 50, key is test_row_0/B:col10/1733480348749/Put/seqid=0 2024-12-06T10:19:10,277 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): fa004d65282160f629f3eb2a5c9dca1d#A#compaction#508 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-06T10:19:10,277 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/.tmp/A/e3c037045dfd4b5b8a6f7029565c6f2d is 50, key is test_row_0/A:col10/1733480348749/Put/seqid=0 2024-12-06T10:19:10,292 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742418_1594 (size=12301) 2024-12-06T10:19:10,293 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=294 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/.tmp/A/c234e6c2a19c4223830f8ce5621ef419 2024-12-06T10:19:10,309 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742419_1595 (size=12983) 2024-12-06T10:19:10,314 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/.tmp/B/7039f35d1a444aa19b21d62acc5ef5b0 as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/B/7039f35d1a444aa19b21d62acc5ef5b0 2024-12-06T10:19:10,320 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in fa004d65282160f629f3eb2a5c9dca1d/B of fa004d65282160f629f3eb2a5c9dca1d into 7039f35d1a444aa19b21d62acc5ef5b0(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-06T10:19:10,320 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for fa004d65282160f629f3eb2a5c9dca1d: 2024-12-06T10:19:10,320 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d., storeName=fa004d65282160f629f3eb2a5c9dca1d/B, priority=13, startTime=1733480350245; duration=0sec 2024-12-06T10:19:10,320 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-06T10:19:10,320 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: fa004d65282160f629f3eb2a5c9dca1d:B 2024-12-06T10:19:10,320 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-06T10:19:10,321 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37183 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-06T10:19:10,321 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HStore(1540): fa004d65282160f629f3eb2a5c9dca1d/C is initiating minor compaction (all files) 2024-12-06T10:19:10,321 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of fa004d65282160f629f3eb2a5c9dca1d/C in TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d. 2024-12-06T10:19:10,321 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/C/3f86e39071fc4108bc895f65cbc1498e, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/C/5f10af9c04464dea9142e8b896636dfa, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/C/ff4e0554cb964672b797c1794dba395d] into tmpdir=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/.tmp, totalSize=36.3 K 2024-12-06T10:19:10,321 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.Compactor(224): Compacting 3f86e39071fc4108bc895f65cbc1498e, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=230, earliestPutTs=1733480346565 2024-12-06T10:19:10,322 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.Compactor(224): Compacting 5f10af9c04464dea9142e8b896636dfa, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=258, earliestPutTs=1733480346598 2024-12-06T10:19:10,322 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.Compactor(224): Compacting ff4e0554cb964672b797c1794dba395d, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=270, earliestPutTs=1733480348749 2024-12-06T10:19:10,335 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/.tmp/B/a8a9db3bff954ae9a20104fdc881bab4 is 50, key is test_row_0/B:col10/1733480349407/Put/seqid=0 2024-12-06T10:19:10,336 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742420_1596 (size=12983) 2024-12-06T10:19:10,340 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/.tmp/A/e3c037045dfd4b5b8a6f7029565c6f2d as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/A/e3c037045dfd4b5b8a6f7029565c6f2d 2024-12-06T10:19:10,345 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in fa004d65282160f629f3eb2a5c9dca1d/A of fa004d65282160f629f3eb2a5c9dca1d into e3c037045dfd4b5b8a6f7029565c6f2d(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-06T10:19:10,346 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for fa004d65282160f629f3eb2a5c9dca1d: 2024-12-06T10:19:10,346 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): fa004d65282160f629f3eb2a5c9dca1d#C#compaction#510 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-06T10:19:10,346 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d., storeName=fa004d65282160f629f3eb2a5c9dca1d/A, priority=13, startTime=1733480350244; duration=0sec 2024-12-06T10:19:10,346 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T10:19:10,346 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: fa004d65282160f629f3eb2a5c9dca1d:A 2024-12-06T10:19:10,346 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/.tmp/C/5bbefa46033742a6a741e85bafb7005e is 50, key is test_row_0/C:col10/1733480348749/Put/seqid=0 2024-12-06T10:19:10,355 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742422_1598 (size=12983) 2024-12-06T10:19:10,361 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/.tmp/C/5bbefa46033742a6a741e85bafb7005e as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/C/5bbefa46033742a6a741e85bafb7005e 2024-12-06T10:19:10,368 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in fa004d65282160f629f3eb2a5c9dca1d/C of fa004d65282160f629f3eb2a5c9dca1d into 5bbefa46033742a6a741e85bafb7005e(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-06T10:19:10,368 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for fa004d65282160f629f3eb2a5c9dca1d: 2024-12-06T10:19:10,368 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d., storeName=fa004d65282160f629f3eb2a5c9dca1d/C, priority=13, startTime=1733480350245; duration=0sec 2024-12-06T10:19:10,368 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T10:19:10,368 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: fa004d65282160f629f3eb2a5c9dca1d:C 2024-12-06T10:19:10,373 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742421_1597 (size=12301) 2024-12-06T10:19:10,436 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=142 2024-12-06T10:19:10,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(8581): Flush requested on fa004d65282160f629f3eb2a5c9dca1d 2024-12-06T10:19:10,530 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d. as already flushing 2024-12-06T10:19:10,546 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:10,546 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:10,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 152 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51434 deadline: 1733480410541, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:10,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 143 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51466 deadline: 1733480410540, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:10,547 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:10,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 144 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51460 deadline: 1733480410542, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:10,552 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:10,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 150 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51492 deadline: 1733480410546, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:10,650 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:10,650 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 154 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51434 deadline: 1733480410647, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:10,650 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:10,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 145 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51466 deadline: 1733480410647, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:10,651 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:10,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 146 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51460 deadline: 1733480410647, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:10,657 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:10,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 152 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51492 deadline: 1733480410652, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:10,758 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:10,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 128 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51440 deadline: 1733480410755, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:10,759 DEBUG [Thread-2340 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4151 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d., hostname=552d6a33fa09,33397,1733480204743, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-06T10:19:10,773 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=294 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/.tmp/B/a8a9db3bff954ae9a20104fdc881bab4 2024-12-06T10:19:10,780 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/.tmp/C/3ffd5073e7dc4242b1780dc0d51c7769 is 50, key is test_row_0/C:col10/1733480349407/Put/seqid=0 2024-12-06T10:19:10,785 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742423_1599 (size=12301) 2024-12-06T10:19:10,856 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:10,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 147 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51466 deadline: 1733480410852, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:10,856 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:10,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 156 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51434 deadline: 1733480410852, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:10,856 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:10,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 148 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51460 deadline: 1733480410853, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:10,863 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:10,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 154 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51492 deadline: 1733480410859, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:11,160 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:11,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 158 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51434 deadline: 1733480411157, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:11,163 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:11,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 149 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51466 deadline: 1733480411158, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:11,163 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:11,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 150 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51460 deadline: 1733480411159, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:11,170 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:11,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 156 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51492 deadline: 1733480411165, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:11,185 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=294 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/.tmp/C/3ffd5073e7dc4242b1780dc0d51c7769 2024-12-06T10:19:11,189 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/.tmp/A/c234e6c2a19c4223830f8ce5621ef419 as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/A/c234e6c2a19c4223830f8ce5621ef419 2024-12-06T10:19:11,192 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/A/c234e6c2a19c4223830f8ce5621ef419, entries=150, sequenceid=294, filesize=12.0 K 2024-12-06T10:19:11,192 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/.tmp/B/a8a9db3bff954ae9a20104fdc881bab4 as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/B/a8a9db3bff954ae9a20104fdc881bab4 2024-12-06T10:19:11,195 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/B/a8a9db3bff954ae9a20104fdc881bab4, entries=150, sequenceid=294, filesize=12.0 K 2024-12-06T10:19:11,196 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/.tmp/C/3ffd5073e7dc4242b1780dc0d51c7769 as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/C/3ffd5073e7dc4242b1780dc0d51c7769 2024-12-06T10:19:11,199 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/C/3ffd5073e7dc4242b1780dc0d51c7769, entries=150, sequenceid=294, filesize=12.0 K 2024-12-06T10:19:11,208 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=60.38 KB/61830 for fa004d65282160f629f3eb2a5c9dca1d in 957ms, sequenceid=294, compaction requested=false 2024-12-06T10:19:11,208 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HRegion(2538): Flush status journal for fa004d65282160f629f3eb2a5c9dca1d: 2024-12-06T10:19:11,208 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d. 2024-12-06T10:19:11,208 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=143 2024-12-06T10:19:11,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33169 {}] master.HMaster(4106): Remote procedure done, pid=143 2024-12-06T10:19:11,211 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=143, resume processing ppid=142 2024-12-06T10:19:11,211 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=143, ppid=142, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.8750 sec 2024-12-06T10:19:11,212 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=142, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=142, table=TestAcidGuarantees in 1.8790 sec 2024-12-06T10:19:11,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=142 2024-12-06T10:19:11,437 INFO [Thread-2350 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 142 completed 2024-12-06T10:19:11,438 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33169 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-06T10:19:11,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33169 {}] procedure2.ProcedureExecutor(1098): Stored pid=144, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=144, table=TestAcidGuarantees 2024-12-06T10:19:11,439 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=144, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=144, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-06T10:19:11,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=144 2024-12-06T10:19:11,440 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=144, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=144, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-06T10:19:11,440 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=145, ppid=144, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-06T10:19:11,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=144 2024-12-06T10:19:11,592 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,33397,1733480204743 2024-12-06T10:19:11,593 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33397 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=145 2024-12-06T10:19:11,593 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d. 2024-12-06T10:19:11,593 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.HRegion(2837): Flushing fa004d65282160f629f3eb2a5c9dca1d 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-12-06T10:19:11,593 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.CompactingMemStore(205): FLUSHING TO DISK fa004d65282160f629f3eb2a5c9dca1d, store=A 2024-12-06T10:19:11,594 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:19:11,594 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.CompactingMemStore(205): FLUSHING TO DISK fa004d65282160f629f3eb2a5c9dca1d, store=B 2024-12-06T10:19:11,594 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:19:11,594 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.CompactingMemStore(205): FLUSHING TO DISK fa004d65282160f629f3eb2a5c9dca1d, store=C 2024-12-06T10:19:11,594 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:19:11,598 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=145}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/.tmp/A/0a169636d1654da89253f3d6cea7546a is 50, key is test_row_0/A:col10/1733480350534/Put/seqid=0 2024-12-06T10:19:11,602 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742424_1600 (size=12301) 2024-12-06T10:19:11,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(8581): Flush requested on fa004d65282160f629f3eb2a5c9dca1d 2024-12-06T10:19:11,667 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d. as already flushing 2024-12-06T10:19:11,703 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:11,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 162 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51492 deadline: 1733480411697, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:11,708 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:11,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 158 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51460 deadline: 1733480411702, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:11,708 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:11,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 156 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51466 deadline: 1733480411703, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:11,709 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:11,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 166 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51434 deadline: 1733480411703, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:11,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=144 2024-12-06T10:19:11,808 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:11,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 164 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51492 deadline: 1733480411804, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:11,816 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:11,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 160 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51460 deadline: 1733480411809, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:11,817 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:11,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 158 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51466 deadline: 1733480411809, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:11,817 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:11,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 168 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51434 deadline: 1733480411810, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:12,002 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=309 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/.tmp/A/0a169636d1654da89253f3d6cea7546a 2024-12-06T10:19:12,010 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=145}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/.tmp/B/485b6fbf06984a1692066bfc4114cb75 is 50, key is test_row_0/B:col10/1733480350534/Put/seqid=0 2024-12-06T10:19:12,013 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:12,013 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 166 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51492 deadline: 1733480412009, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:12,014 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742425_1601 (size=12301) 2024-12-06T10:19:12,016 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=309 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/.tmp/B/485b6fbf06984a1692066bfc4114cb75 2024-12-06T10:19:12,023 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:12,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 162 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51460 deadline: 1733480412019, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:12,023 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:12,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 160 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51466 deadline: 1733480412019, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:12,023 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:12,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 170 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51434 deadline: 1733480412019, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:12,024 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=145}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/.tmp/C/458441621d774baeb80a120bdeb22672 is 50, key is test_row_0/C:col10/1733480350534/Put/seqid=0 2024-12-06T10:19:12,027 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742426_1602 (size=12301) 2024-12-06T10:19:12,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=144 2024-12-06T10:19:12,319 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:12,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 168 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51492 deadline: 1733480412316, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:12,327 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:12,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 164 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51460 deadline: 1733480412324, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:12,328 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:12,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 172 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51434 deadline: 1733480412325, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:12,329 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:12,329 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 162 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51466 deadline: 1733480412326, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:12,428 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=309 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/.tmp/C/458441621d774baeb80a120bdeb22672 2024-12-06T10:19:12,432 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/.tmp/A/0a169636d1654da89253f3d6cea7546a as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/A/0a169636d1654da89253f3d6cea7546a 2024-12-06T10:19:12,436 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/A/0a169636d1654da89253f3d6cea7546a, entries=150, sequenceid=309, filesize=12.0 K 2024-12-06T10:19:12,436 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/.tmp/B/485b6fbf06984a1692066bfc4114cb75 as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/B/485b6fbf06984a1692066bfc4114cb75 2024-12-06T10:19:12,439 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/B/485b6fbf06984a1692066bfc4114cb75, entries=150, sequenceid=309, filesize=12.0 K 2024-12-06T10:19:12,440 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/.tmp/C/458441621d774baeb80a120bdeb22672 as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/C/458441621d774baeb80a120bdeb22672 2024-12-06T10:19:12,442 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/C/458441621d774baeb80a120bdeb22672, entries=150, sequenceid=309, filesize=12.0 K 2024-12-06T10:19:12,443 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for fa004d65282160f629f3eb2a5c9dca1d in 850ms, sequenceid=309, compaction requested=true 2024-12-06T10:19:12,443 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.HRegion(2538): Flush status journal for fa004d65282160f629f3eb2a5c9dca1d: 2024-12-06T10:19:12,443 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d. 2024-12-06T10:19:12,443 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=145 2024-12-06T10:19:12,443 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33169 {}] master.HMaster(4106): Remote procedure done, pid=145 2024-12-06T10:19:12,446 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=145, resume processing ppid=144 2024-12-06T10:19:12,446 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=145, ppid=144, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.0040 sec 2024-12-06T10:19:12,447 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=144, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=144, table=TestAcidGuarantees in 1.0080 sec 2024-12-06T10:19:12,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=144 2024-12-06T10:19:12,543 INFO [Thread-2350 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 144 completed 2024-12-06T10:19:12,544 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33169 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-06T10:19:12,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33169 {}] procedure2.ProcedureExecutor(1098): Stored pid=146, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=146, table=TestAcidGuarantees 2024-12-06T10:19:12,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=146 2024-12-06T10:19:12,546 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=146, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=146, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-06T10:19:12,547 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=146, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=146, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-06T10:19:12,547 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=147, ppid=146, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-06T10:19:12,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=146 2024-12-06T10:19:12,698 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,33397,1733480204743 2024-12-06T10:19:12,699 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33397 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=147 2024-12-06T10:19:12,699 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d. 2024-12-06T10:19:12,699 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.HRegion(2837): Flushing fa004d65282160f629f3eb2a5c9dca1d 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-12-06T10:19:12,699 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.CompactingMemStore(205): FLUSHING TO DISK fa004d65282160f629f3eb2a5c9dca1d, store=A 2024-12-06T10:19:12,699 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:19:12,699 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.CompactingMemStore(205): FLUSHING TO DISK fa004d65282160f629f3eb2a5c9dca1d, store=B 2024-12-06T10:19:12,700 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:19:12,700 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.CompactingMemStore(205): FLUSHING TO DISK fa004d65282160f629f3eb2a5c9dca1d, store=C 2024-12-06T10:19:12,700 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:19:12,704 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=147}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/.tmp/A/bab7cac8b9344a678e3bfe8e6db12471 is 50, key is test_row_0/A:col10/1733480351702/Put/seqid=0 2024-12-06T10:19:12,710 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742427_1603 (size=12301) 2024-12-06T10:19:12,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(8581): Flush requested on fa004d65282160f629f3eb2a5c9dca1d 2024-12-06T10:19:12,827 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d. as already flushing 2024-12-06T10:19:12,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=146 2024-12-06T10:19:12,879 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:12,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 165 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51466 deadline: 1733480412871, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:12,884 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:12,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 176 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51434 deadline: 1733480412878, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:12,884 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:12,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 174 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51492 deadline: 1733480412879, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:12,885 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:12,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 168 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51460 deadline: 1733480412879, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:12,984 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:12,984 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 167 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51466 deadline: 1733480412980, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:12,992 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:12,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 178 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51434 deadline: 1733480412985, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:12,992 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:12,993 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 176 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51492 deadline: 1733480412985, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:12,993 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:12,993 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 170 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51460 deadline: 1733480412986, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:13,056 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-06T10:19:13,111 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=333 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/.tmp/A/bab7cac8b9344a678e3bfe8e6db12471 2024-12-06T10:19:13,119 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=147}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/.tmp/B/46ddf427efcc4d2a9694d35d5de56bb8 is 50, key is test_row_0/B:col10/1733480351702/Put/seqid=0 2024-12-06T10:19:13,125 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742428_1604 (size=12301) 2024-12-06T10:19:13,125 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=333 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/.tmp/B/46ddf427efcc4d2a9694d35d5de56bb8 2024-12-06T10:19:13,142 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=147}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/.tmp/C/2bf7fc4f543d4a5ba932681441395edd is 50, key is test_row_0/C:col10/1733480351702/Put/seqid=0 2024-12-06T10:19:13,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=146 2024-12-06T10:19:13,149 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742429_1605 (size=12301) 2024-12-06T10:19:13,150 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=333 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/.tmp/C/2bf7fc4f543d4a5ba932681441395edd 2024-12-06T10:19:13,153 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/.tmp/A/bab7cac8b9344a678e3bfe8e6db12471 as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/A/bab7cac8b9344a678e3bfe8e6db12471 2024-12-06T10:19:13,157 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/A/bab7cac8b9344a678e3bfe8e6db12471, entries=150, sequenceid=333, filesize=12.0 K 2024-12-06T10:19:13,157 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/.tmp/B/46ddf427efcc4d2a9694d35d5de56bb8 as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/B/46ddf427efcc4d2a9694d35d5de56bb8 2024-12-06T10:19:13,161 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/B/46ddf427efcc4d2a9694d35d5de56bb8, entries=150, sequenceid=333, filesize=12.0 K 2024-12-06T10:19:13,162 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/.tmp/C/2bf7fc4f543d4a5ba932681441395edd as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/C/2bf7fc4f543d4a5ba932681441395edd 2024-12-06T10:19:13,165 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/C/2bf7fc4f543d4a5ba932681441395edd, entries=150, sequenceid=333, filesize=12.0 K 2024-12-06T10:19:13,168 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=60.38 KB/61830 for fa004d65282160f629f3eb2a5c9dca1d in 468ms, sequenceid=333, compaction requested=true 2024-12-06T10:19:13,168 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.HRegion(2538): Flush status journal for fa004d65282160f629f3eb2a5c9dca1d: 2024-12-06T10:19:13,168 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d. 2024-12-06T10:19:13,168 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=147 2024-12-06T10:19:13,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33169 {}] master.HMaster(4106): Remote procedure done, pid=147 2024-12-06T10:19:13,170 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=147, resume processing ppid=146 2024-12-06T10:19:13,170 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=147, ppid=146, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 622 msec 2024-12-06T10:19:13,171 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=146, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=146, table=TestAcidGuarantees in 626 msec 2024-12-06T10:19:13,193 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(8581): Flush requested on fa004d65282160f629f3eb2a5c9dca1d 2024-12-06T10:19:13,193 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing fa004d65282160f629f3eb2a5c9dca1d 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-12-06T10:19:13,193 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK fa004d65282160f629f3eb2a5c9dca1d, store=A 2024-12-06T10:19:13,193 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:19:13,193 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK fa004d65282160f629f3eb2a5c9dca1d, store=B 2024-12-06T10:19:13,193 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:19:13,193 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK fa004d65282160f629f3eb2a5c9dca1d, store=C 2024-12-06T10:19:13,193 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:19:13,198 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/.tmp/A/28cdbda66bb14e2a939e079267c710fe is 50, key is test_row_0/A:col10/1733480353192/Put/seqid=0 2024-12-06T10:19:13,203 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742430_1606 (size=14741) 2024-12-06T10:19:13,249 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:13,249 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 183 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51492 deadline: 1733480413239, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:13,249 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:13,249 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 185 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51434 deadline: 1733480413244, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:13,250 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:13,250 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 177 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51460 deadline: 1733480413244, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:13,255 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:13,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 176 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51466 deadline: 1733480413245, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:13,357 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:13,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 185 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51492 deadline: 1733480413350, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:13,357 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:13,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 187 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51434 deadline: 1733480413350, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:13,357 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:13,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 179 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51460 deadline: 1733480413351, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:13,362 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:13,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 178 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51466 deadline: 1733480413356, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:13,570 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:13,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 189 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51434 deadline: 1733480413558, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:13,570 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:13,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 187 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51492 deadline: 1733480413565, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:13,571 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:13,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 180 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51466 deadline: 1733480413566, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:13,571 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:13,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 181 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51460 deadline: 1733480413566, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:13,603 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=346 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/.tmp/A/28cdbda66bb14e2a939e079267c710fe 2024-12-06T10:19:13,610 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/.tmp/B/38f9e5cfaac24627b1ced296c665101b is 50, key is test_row_0/B:col10/1733480353192/Put/seqid=0 2024-12-06T10:19:13,614 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742431_1607 (size=12301) 2024-12-06T10:19:13,614 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=346 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/.tmp/B/38f9e5cfaac24627b1ced296c665101b 2024-12-06T10:19:13,621 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/.tmp/C/f92e151115324877ab02e61c0337a451 is 50, key is test_row_0/C:col10/1733480353192/Put/seqid=0 2024-12-06T10:19:13,624 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742432_1608 (size=12301) 2024-12-06T10:19:13,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=146 2024-12-06T10:19:13,648 INFO [Thread-2350 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 146 completed 2024-12-06T10:19:13,650 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33169 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-06T10:19:13,650 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33169 {}] procedure2.ProcedureExecutor(1098): Stored pid=148, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=148, table=TestAcidGuarantees 2024-12-06T10:19:13,651 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=148, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=148, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-06T10:19:13,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=148 2024-12-06T10:19:13,651 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=148, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=148, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-06T10:19:13,652 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=149, ppid=148, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-06T10:19:13,687 DEBUG [Thread-2351 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x11193a0c to 127.0.0.1:61610 2024-12-06T10:19:13,687 DEBUG [Thread-2351 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-06T10:19:13,690 DEBUG [Thread-2357 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x008a917b to 127.0.0.1:61610 2024-12-06T10:19:13,690 DEBUG [Thread-2357 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-06T10:19:13,691 DEBUG [Thread-2359 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x054c2725 to 127.0.0.1:61610 2024-12-06T10:19:13,691 DEBUG [Thread-2359 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-06T10:19:13,694 DEBUG [Thread-2353 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x7861b162 to 127.0.0.1:61610 2024-12-06T10:19:13,694 DEBUG [Thread-2353 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-06T10:19:13,698 DEBUG [Thread-2355 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x154f0f85 to 127.0.0.1:61610 2024-12-06T10:19:13,698 DEBUG [Thread-2355 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-06T10:19:13,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=148 2024-12-06T10:19:13,803 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,33397,1733480204743 2024-12-06T10:19:13,804 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33397 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=149 2024-12-06T10:19:13,804 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d. 2024-12-06T10:19:13,804 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d. as already flushing 2024-12-06T10:19:13,804 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d. 2024-12-06T10:19:13,804 ERROR [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=149}] handler.RSProcedureHandler(58): pid=149 java.io.IOException: Unable to complete flush {ENCODED => fa004d65282160f629f3eb2a5c9dca1d, NAME => 'TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:19:13,804 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=149 java.io.IOException: Unable to complete flush {ENCODED => fa004d65282160f629f3eb2a5c9dca1d, NAME => 'TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:19:13,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33169 {}] master.HMaster(4114): Remote procedure failed, pid=149 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => fa004d65282160f629f3eb2a5c9dca1d, NAME => 'TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => fa004d65282160f629f3eb2a5c9dca1d, NAME => 'TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:19:13,871 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:13,872 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:13,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 191 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51434 deadline: 1733480413871, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:13,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 189 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51492 deadline: 1733480413871, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:13,874 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:13,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 182 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51466 deadline: 1733480413873, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:13,874 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:13,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 183 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51460 deadline: 1733480413874, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:13,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=148 2024-12-06T10:19:13,956 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,33397,1733480204743 2024-12-06T10:19:13,956 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33397 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=149 2024-12-06T10:19:13,956 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d. 2024-12-06T10:19:13,956 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d. as already flushing 2024-12-06T10:19:13,956 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d. 2024-12-06T10:19:13,956 ERROR [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=149}] handler.RSProcedureHandler(58): pid=149 java.io.IOException: Unable to complete flush {ENCODED => fa004d65282160f629f3eb2a5c9dca1d, NAME => 'TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:19:13,956 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=149 java.io.IOException: Unable to complete flush {ENCODED => fa004d65282160f629f3eb2a5c9dca1d, NAME => 'TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:19:13,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33169 {}] master.HMaster(4114): Remote procedure failed, pid=149 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => fa004d65282160f629f3eb2a5c9dca1d, NAME => 'TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => fa004d65282160f629f3eb2a5c9dca1d, NAME => 'TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:19:14,025 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=346 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/.tmp/C/f92e151115324877ab02e61c0337a451 2024-12-06T10:19:14,029 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/.tmp/A/28cdbda66bb14e2a939e079267c710fe as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/A/28cdbda66bb14e2a939e079267c710fe 2024-12-06T10:19:14,031 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/A/28cdbda66bb14e2a939e079267c710fe, entries=200, sequenceid=346, filesize=14.4 K 2024-12-06T10:19:14,032 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/.tmp/B/38f9e5cfaac24627b1ced296c665101b as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/B/38f9e5cfaac24627b1ced296c665101b 2024-12-06T10:19:14,034 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/B/38f9e5cfaac24627b1ced296c665101b, entries=150, sequenceid=346, filesize=12.0 K 2024-12-06T10:19:14,035 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/.tmp/C/f92e151115324877ab02e61c0337a451 as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/C/f92e151115324877ab02e61c0337a451 2024-12-06T10:19:14,037 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/C/f92e151115324877ab02e61c0337a451, entries=150, sequenceid=346, filesize=12.0 K 2024-12-06T10:19:14,038 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=140.89 KB/144270 for fa004d65282160f629f3eb2a5c9dca1d in 845ms, sequenceid=346, compaction requested=true 2024-12-06T10:19:14,038 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for fa004d65282160f629f3eb2a5c9dca1d: 2024-12-06T10:19:14,038 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store fa004d65282160f629f3eb2a5c9dca1d:A, priority=-2147483648, current under compaction store size is 1 2024-12-06T10:19:14,038 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T10:19:14,038 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store fa004d65282160f629f3eb2a5c9dca1d:B, priority=-2147483648, current under compaction store size is 2 2024-12-06T10:19:14,038 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T10:19:14,038 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store fa004d65282160f629f3eb2a5c9dca1d:C, priority=-2147483648, current under compaction store size is 3 2024-12-06T10:19:14,038 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-06T10:19:14,038 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 5 store files, 0 compacting, 5 eligible, 16 blocking 2024-12-06T10:19:14,038 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 5 store files, 0 compacting, 5 eligible, 16 blocking 2024-12-06T10:19:14,039 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 5 files of size 62187 starting at candidate #0 after considering 6 permutations with 6 in ratio 2024-12-06T10:19:14,039 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 5 files of size 64627 starting at candidate #0 after considering 6 permutations with 6 in ratio 2024-12-06T10:19:14,039 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HStore(1540): fa004d65282160f629f3eb2a5c9dca1d/B is initiating minor compaction (all files) 2024-12-06T10:19:14,039 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HStore(1540): fa004d65282160f629f3eb2a5c9dca1d/A is initiating minor compaction (all files) 2024-12-06T10:19:14,039 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of fa004d65282160f629f3eb2a5c9dca1d/B in TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d. 2024-12-06T10:19:14,039 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of fa004d65282160f629f3eb2a5c9dca1d/A in TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d. 2024-12-06T10:19:14,039 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/A/e3c037045dfd4b5b8a6f7029565c6f2d, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/A/c234e6c2a19c4223830f8ce5621ef419, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/A/0a169636d1654da89253f3d6cea7546a, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/A/bab7cac8b9344a678e3bfe8e6db12471, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/A/28cdbda66bb14e2a939e079267c710fe] into tmpdir=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/.tmp, totalSize=63.1 K 2024-12-06T10:19:14,039 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/B/7039f35d1a444aa19b21d62acc5ef5b0, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/B/a8a9db3bff954ae9a20104fdc881bab4, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/B/485b6fbf06984a1692066bfc4114cb75, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/B/46ddf427efcc4d2a9694d35d5de56bb8, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/B/38f9e5cfaac24627b1ced296c665101b] into tmpdir=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/.tmp, totalSize=60.7 K 2024-12-06T10:19:14,040 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.Compactor(224): Compacting e3c037045dfd4b5b8a6f7029565c6f2d, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=270, earliestPutTs=1733480348749 2024-12-06T10:19:14,040 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.Compactor(224): Compacting 7039f35d1a444aa19b21d62acc5ef5b0, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=270, earliestPutTs=1733480348749 2024-12-06T10:19:14,040 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.Compactor(224): Compacting c234e6c2a19c4223830f8ce5621ef419, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=294, earliestPutTs=1733480349389 2024-12-06T10:19:14,040 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.Compactor(224): Compacting a8a9db3bff954ae9a20104fdc881bab4, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=294, earliestPutTs=1733480349389 2024-12-06T10:19:14,040 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.Compactor(224): Compacting 0a169636d1654da89253f3d6cea7546a, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=309, earliestPutTs=1733480350534 2024-12-06T10:19:14,040 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.Compactor(224): Compacting 485b6fbf06984a1692066bfc4114cb75, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=309, earliestPutTs=1733480350534 2024-12-06T10:19:14,040 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.Compactor(224): Compacting bab7cac8b9344a678e3bfe8e6db12471, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=333, earliestPutTs=1733480351688 2024-12-06T10:19:14,040 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.Compactor(224): Compacting 46ddf427efcc4d2a9694d35d5de56bb8, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=333, earliestPutTs=1733480351688 2024-12-06T10:19:14,040 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.Compactor(224): Compacting 28cdbda66bb14e2a939e079267c710fe, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=346, earliestPutTs=1733480352825 2024-12-06T10:19:14,040 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.Compactor(224): Compacting 38f9e5cfaac24627b1ced296c665101b, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=346, earliestPutTs=1733480352825 2024-12-06T10:19:14,048 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): fa004d65282160f629f3eb2a5c9dca1d#B#compaction#521 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-06T10:19:14,048 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): fa004d65282160f629f3eb2a5c9dca1d#A#compaction#522 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-06T10:19:14,048 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/.tmp/B/fc5cc7a8a15d47f0b3c254a7495f5253 is 50, key is test_row_0/B:col10/1733480353192/Put/seqid=0 2024-12-06T10:19:14,048 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/.tmp/A/cb804343d2c74b999e6546680b849117 is 50, key is test_row_0/A:col10/1733480353192/Put/seqid=0 2024-12-06T10:19:14,055 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742434_1610 (size=13153) 2024-12-06T10:19:14,055 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742433_1609 (size=13153) 2024-12-06T10:19:14,108 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,33397,1733480204743 2024-12-06T10:19:14,108 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33397 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=149 2024-12-06T10:19:14,109 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d. 2024-12-06T10:19:14,109 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.HRegion(2837): Flushing fa004d65282160f629f3eb2a5c9dca1d 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-12-06T10:19:14,109 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.CompactingMemStore(205): FLUSHING TO DISK fa004d65282160f629f3eb2a5c9dca1d, store=A 2024-12-06T10:19:14,109 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:19:14,109 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.CompactingMemStore(205): FLUSHING TO DISK fa004d65282160f629f3eb2a5c9dca1d, store=B 2024-12-06T10:19:14,109 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:19:14,109 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.CompactingMemStore(205): FLUSHING TO DISK fa004d65282160f629f3eb2a5c9dca1d, store=C 2024-12-06T10:19:14,109 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:19:14,112 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=149}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/.tmp/A/3a1b79aa735a47d38969df3aef684d7f is 50, key is test_row_0/A:col10/1733480353242/Put/seqid=0 2024-12-06T10:19:14,115 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742435_1611 (size=12301) 2024-12-06T10:19:14,254 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=148 2024-12-06T10:19:14,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(8581): Flush requested on fa004d65282160f629f3eb2a5c9dca1d 2024-12-06T10:19:14,374 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d. as already flushing 2024-12-06T10:19:14,374 DEBUG [Thread-2346 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x06bc0f7c to 127.0.0.1:61610 2024-12-06T10:19:14,374 DEBUG [Thread-2346 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-06T10:19:14,378 DEBUG [Thread-2344 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x032bb71c to 127.0.0.1:61610 2024-12-06T10:19:14,378 DEBUG [Thread-2348 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x1b8b6e04 to 127.0.0.1:61610 2024-12-06T10:19:14,378 DEBUG [Thread-2344 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-06T10:19:14,378 DEBUG [Thread-2348 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-06T10:19:14,379 DEBUG [Thread-2342 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x5ef40578 to 127.0.0.1:61610 2024-12-06T10:19:14,379 DEBUG [Thread-2342 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-06T10:19:14,459 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/.tmp/B/fc5cc7a8a15d47f0b3c254a7495f5253 as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/B/fc5cc7a8a15d47f0b3c254a7495f5253 2024-12-06T10:19:14,459 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/.tmp/A/cb804343d2c74b999e6546680b849117 as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/A/cb804343d2c74b999e6546680b849117 2024-12-06T10:19:14,462 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 5 (all) file(s) in fa004d65282160f629f3eb2a5c9dca1d/A of fa004d65282160f629f3eb2a5c9dca1d into cb804343d2c74b999e6546680b849117(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-06T10:19:14,462 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 5 (all) file(s) in fa004d65282160f629f3eb2a5c9dca1d/B of fa004d65282160f629f3eb2a5c9dca1d into fc5cc7a8a15d47f0b3c254a7495f5253(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-06T10:19:14,462 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for fa004d65282160f629f3eb2a5c9dca1d: 2024-12-06T10:19:14,462 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for fa004d65282160f629f3eb2a5c9dca1d: 2024-12-06T10:19:14,462 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d., storeName=fa004d65282160f629f3eb2a5c9dca1d/A, priority=11, startTime=1733480354038; duration=0sec 2024-12-06T10:19:14,462 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d., storeName=fa004d65282160f629f3eb2a5c9dca1d/B, priority=11, startTime=1733480354038; duration=0sec 2024-12-06T10:19:14,462 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-06T10:19:14,462 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-06T10:19:14,462 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: fa004d65282160f629f3eb2a5c9dca1d:A 2024-12-06T10:19:14,462 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: fa004d65282160f629f3eb2a5c9dca1d:B 2024-12-06T10:19:14,462 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 5 store files, 0 compacting, 5 eligible, 16 blocking 2024-12-06T10:19:14,463 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 5 files of size 62187 starting at candidate #0 after considering 6 permutations with 6 in ratio 2024-12-06T10:19:14,463 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HStore(1540): fa004d65282160f629f3eb2a5c9dca1d/C is initiating minor compaction (all files) 2024-12-06T10:19:14,463 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of fa004d65282160f629f3eb2a5c9dca1d/C in TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d. 2024-12-06T10:19:14,463 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/C/5bbefa46033742a6a741e85bafb7005e, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/C/3ffd5073e7dc4242b1780dc0d51c7769, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/C/458441621d774baeb80a120bdeb22672, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/C/2bf7fc4f543d4a5ba932681441395edd, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/C/f92e151115324877ab02e61c0337a451] into tmpdir=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/.tmp, totalSize=60.7 K 2024-12-06T10:19:14,464 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.Compactor(224): Compacting 5bbefa46033742a6a741e85bafb7005e, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=270, earliestPutTs=1733480348749 2024-12-06T10:19:14,464 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.Compactor(224): Compacting 3ffd5073e7dc4242b1780dc0d51c7769, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=294, earliestPutTs=1733480349389 2024-12-06T10:19:14,464 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.Compactor(224): Compacting 458441621d774baeb80a120bdeb22672, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=309, earliestPutTs=1733480350534 2024-12-06T10:19:14,464 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.Compactor(224): Compacting 2bf7fc4f543d4a5ba932681441395edd, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=333, earliestPutTs=1733480351688 2024-12-06T10:19:14,464 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.Compactor(224): Compacting f92e151115324877ab02e61c0337a451, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=346, earliestPutTs=1733480352825 2024-12-06T10:19:14,471 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): fa004d65282160f629f3eb2a5c9dca1d#C#compaction#524 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-06T10:19:14,472 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/.tmp/C/39163fdf209040a095995439c4797b8b is 50, key is test_row_0/C:col10/1733480353192/Put/seqid=0 2024-12-06T10:19:14,475 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742436_1612 (size=13153) 2024-12-06T10:19:14,479 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/.tmp/C/39163fdf209040a095995439c4797b8b as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/C/39163fdf209040a095995439c4797b8b 2024-12-06T10:19:14,483 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 5 (all) file(s) in fa004d65282160f629f3eb2a5c9dca1d/C of fa004d65282160f629f3eb2a5c9dca1d into 39163fdf209040a095995439c4797b8b(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-06T10:19:14,483 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for fa004d65282160f629f3eb2a5c9dca1d: 2024-12-06T10:19:14,483 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d., storeName=fa004d65282160f629f3eb2a5c9dca1d/C, priority=11, startTime=1733480354038; duration=0sec 2024-12-06T10:19:14,483 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T10:19:14,483 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: fa004d65282160f629f3eb2a5c9dca1d:C 2024-12-06T10:19:14,516 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=370 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/.tmp/A/3a1b79aa735a47d38969df3aef684d7f 2024-12-06T10:19:14,521 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=149}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/.tmp/B/b523b28860744c88843821548f54336e is 50, key is test_row_0/B:col10/1733480353242/Put/seqid=0 2024-12-06T10:19:14,524 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742437_1613 (size=12301) 2024-12-06T10:19:14,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=148 2024-12-06T10:19:14,768 DEBUG [Thread-2340 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x32c12a30 to 127.0.0.1:61610 2024-12-06T10:19:14,768 DEBUG [Thread-2340 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-06T10:19:14,925 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=370 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/.tmp/B/b523b28860744c88843821548f54336e 2024-12-06T10:19:14,931 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=149}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/.tmp/C/32be049d9fbb4d918db3c42dc5b21a35 is 50, key is test_row_0/C:col10/1733480353242/Put/seqid=0 2024-12-06T10:19:14,934 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742438_1614 (size=12301) 2024-12-06T10:19:15,334 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=370 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/.tmp/C/32be049d9fbb4d918db3c42dc5b21a35 2024-12-06T10:19:15,338 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/.tmp/A/3a1b79aa735a47d38969df3aef684d7f as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/A/3a1b79aa735a47d38969df3aef684d7f 2024-12-06T10:19:15,341 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/A/3a1b79aa735a47d38969df3aef684d7f, entries=150, sequenceid=370, filesize=12.0 K 2024-12-06T10:19:15,342 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/.tmp/B/b523b28860744c88843821548f54336e as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/B/b523b28860744c88843821548f54336e 2024-12-06T10:19:15,344 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/B/b523b28860744c88843821548f54336e, entries=150, sequenceid=370, filesize=12.0 K 2024-12-06T10:19:15,345 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/.tmp/C/32be049d9fbb4d918db3c42dc5b21a35 as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/C/32be049d9fbb4d918db3c42dc5b21a35 2024-12-06T10:19:15,349 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/C/32be049d9fbb4d918db3c42dc5b21a35, entries=150, sequenceid=370, filesize=12.0 K 2024-12-06T10:19:15,350 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=33.54 KB/34350 for fa004d65282160f629f3eb2a5c9dca1d in 1241ms, sequenceid=370, compaction requested=false 2024-12-06T10:19:15,350 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.HRegion(2538): Flush status journal for fa004d65282160f629f3eb2a5c9dca1d: 2024-12-06T10:19:15,350 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d. 2024-12-06T10:19:15,350 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=149 2024-12-06T10:19:15,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33169 {}] master.HMaster(4106): Remote procedure done, pid=149 2024-12-06T10:19:15,352 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=149, resume processing ppid=148 2024-12-06T10:19:15,352 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=149, ppid=148, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.6990 sec 2024-12-06T10:19:15,353 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=148, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=148, table=TestAcidGuarantees in 1.7020 sec 2024-12-06T10:19:15,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=148 2024-12-06T10:19:15,755 INFO [Thread-2350 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 148 completed 2024-12-06T10:19:15,755 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(392): Finished test. Writers: 2024-12-06T10:19:15,755 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 43 2024-12-06T10:19:15,756 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 60 2024-12-06T10:19:15,756 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 59 2024-12-06T10:19:15,756 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 68 2024-12-06T10:19:15,756 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 66 2024-12-06T10:19:15,756 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(396): Readers: 2024-12-06T10:19:15,756 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(400): Scanners: 2024-12-06T10:19:15,756 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 2447 2024-12-06T10:19:15,756 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 7341 rows 2024-12-06T10:19:15,756 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 2438 2024-12-06T10:19:15,756 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 7314 rows 2024-12-06T10:19:15,756 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 2445 2024-12-06T10:19:15,756 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 7335 rows 2024-12-06T10:19:15,756 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 2469 2024-12-06T10:19:15,756 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 7407 rows 2024-12-06T10:19:15,756 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 2449 2024-12-06T10:19:15,756 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 7347 rows 2024-12-06T10:19:15,756 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-12-06T10:19:15,756 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x022a6e9f to 127.0.0.1:61610 2024-12-06T10:19:15,756 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-06T10:19:15,761 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of TestAcidGuarantees 2024-12-06T10:19:15,761 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33169 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.2 disable TestAcidGuarantees 2024-12-06T10:19:15,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33169 {}] procedure2.ProcedureExecutor(1098): Stored pid=150, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=TestAcidGuarantees 2024-12-06T10:19:15,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=150 2024-12-06T10:19:15,764 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733480355764"}]},"ts":"1733480355764"} 2024-12-06T10:19:15,765 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLING in hbase:meta 2024-12-06T10:19:15,767 INFO [PEWorker-1 {}] procedure.DisableTableProcedure(284): Set TestAcidGuarantees to state=DISABLING 2024-12-06T10:19:15,768 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=151, ppid=150, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=TestAcidGuarantees}] 2024-12-06T10:19:15,769 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=152, ppid=151, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=fa004d65282160f629f3eb2a5c9dca1d, UNASSIGN}] 2024-12-06T10:19:15,769 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=152, ppid=151, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=fa004d65282160f629f3eb2a5c9dca1d, UNASSIGN 2024-12-06T10:19:15,770 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=152 updating hbase:meta row=fa004d65282160f629f3eb2a5c9dca1d, regionState=CLOSING, regionLocation=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:15,771 DEBUG [PEWorker-3 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-06T10:19:15,771 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=153, ppid=152, state=RUNNABLE; CloseRegionProcedure fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743}] 2024-12-06T10:19:15,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=150 2024-12-06T10:19:15,922 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,33397,1733480204743 2024-12-06T10:19:15,922 INFO [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION, pid=153}] handler.UnassignRegionHandler(124): Close fa004d65282160f629f3eb2a5c9dca1d 2024-12-06T10:19:15,922 DEBUG [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION, pid=153}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-06T10:19:15,922 DEBUG [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION, pid=153}] regionserver.HRegion(1681): Closing fa004d65282160f629f3eb2a5c9dca1d, disabling compactions & flushes 2024-12-06T10:19:15,922 INFO [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION, pid=153}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d. 2024-12-06T10:19:15,922 DEBUG [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION, pid=153}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d. 2024-12-06T10:19:15,922 DEBUG [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION, pid=153}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d. after waiting 0 ms 2024-12-06T10:19:15,922 DEBUG [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION, pid=153}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d. 2024-12-06T10:19:15,923 INFO [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION, pid=153}] regionserver.HRegion(2837): Flushing fa004d65282160f629f3eb2a5c9dca1d 3/3 column families, dataSize=33.54 KB heapSize=88.64 KB 2024-12-06T10:19:15,923 DEBUG [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION, pid=153}] regionserver.CompactingMemStore(205): FLUSHING TO DISK fa004d65282160f629f3eb2a5c9dca1d, store=A 2024-12-06T10:19:15,923 DEBUG [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION, pid=153}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:19:15,923 DEBUG [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION, pid=153}] regionserver.CompactingMemStore(205): FLUSHING TO DISK fa004d65282160f629f3eb2a5c9dca1d, store=B 2024-12-06T10:19:15,923 DEBUG [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION, pid=153}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:19:15,923 DEBUG [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION, pid=153}] regionserver.CompactingMemStore(205): FLUSHING TO DISK fa004d65282160f629f3eb2a5c9dca1d, store=C 2024-12-06T10:19:15,923 DEBUG [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION, pid=153}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:19:15,926 DEBUG [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION, pid=153}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/.tmp/A/91ead72c81cb48d58f9be02a7a304adf is 50, key is test_row_0/A:col10/1733480354767/Put/seqid=0 2024-12-06T10:19:15,929 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742439_1615 (size=12301) 2024-12-06T10:19:16,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=150 2024-12-06T10:19:16,330 INFO [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION, pid=153}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=381 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/.tmp/A/91ead72c81cb48d58f9be02a7a304adf 2024-12-06T10:19:16,335 DEBUG [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION, pid=153}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/.tmp/B/913a4f1b4a514e57b7e94e5379237553 is 50, key is test_row_0/B:col10/1733480354767/Put/seqid=0 2024-12-06T10:19:16,338 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742440_1616 (size=12301) 2024-12-06T10:19:16,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=150 2024-12-06T10:19:16,739 INFO [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION, pid=153}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=381 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/.tmp/B/913a4f1b4a514e57b7e94e5379237553 2024-12-06T10:19:16,744 DEBUG [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION, pid=153}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/.tmp/C/4be468c810a4401b914e0d0a8d16da2d is 50, key is test_row_0/C:col10/1733480354767/Put/seqid=0 2024-12-06T10:19:16,751 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742441_1617 (size=12301) 2024-12-06T10:19:16,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=150 2024-12-06T10:19:17,151 INFO [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION, pid=153}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=381 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/.tmp/C/4be468c810a4401b914e0d0a8d16da2d 2024-12-06T10:19:17,155 DEBUG [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION, pid=153}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/.tmp/A/91ead72c81cb48d58f9be02a7a304adf as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/A/91ead72c81cb48d58f9be02a7a304adf 2024-12-06T10:19:17,157 INFO [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION, pid=153}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/A/91ead72c81cb48d58f9be02a7a304adf, entries=150, sequenceid=381, filesize=12.0 K 2024-12-06T10:19:17,158 DEBUG [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION, pid=153}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/.tmp/B/913a4f1b4a514e57b7e94e5379237553 as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/B/913a4f1b4a514e57b7e94e5379237553 2024-12-06T10:19:17,160 INFO [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION, pid=153}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/B/913a4f1b4a514e57b7e94e5379237553, entries=150, sequenceid=381, filesize=12.0 K 2024-12-06T10:19:17,161 DEBUG [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION, pid=153}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/.tmp/C/4be468c810a4401b914e0d0a8d16da2d as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/C/4be468c810a4401b914e0d0a8d16da2d 2024-12-06T10:19:17,163 INFO [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION, pid=153}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/C/4be468c810a4401b914e0d0a8d16da2d, entries=150, sequenceid=381, filesize=12.0 K 2024-12-06T10:19:17,164 INFO [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION, pid=153}] regionserver.HRegion(3040): Finished flush of dataSize ~33.54 KB/34350, heapSize ~88.59 KB/90720, currentSize=0 B/0 for fa004d65282160f629f3eb2a5c9dca1d in 1242ms, sequenceid=381, compaction requested=true 2024-12-06T10:19:17,164 DEBUG [StoreCloser-TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/A/213be9078a724502b0543613c355e47e, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/A/9e4917827bd84af7bcc3bfe3ef1af491, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/A/694462eab5524b97b36c8014089bcb29, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/A/1b66dbc97bb641bb9413abf67d2ed65d, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/A/4a60b7efd80744a3b21e24a48bcfb2fc, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/A/07d426ba1bd44b2f9cd1e3185c956a7e, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/A/92bff4133e2c4828924453d58bbaa7c5, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/A/6eab958228124930a5383b219bfe0321, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/A/33edb85437004723baa4de97de365b34, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/A/876c3e48ec8a49e29f2c5991165d78e2, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/A/1d97ff855e6a4cb0bbcdaadae6aedccd, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/A/58a84f83239949f3972a01b499ed0c8a, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/A/9d28ebac95f64c3998f275f2d64ded22, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/A/60a2d5a4cce74fbba3fab7f61d0a27ec, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/A/156cba7e40574305a18c45c0f18f3b0c, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/A/adc38d33d639496a80e29e668623af2f, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/A/dbc05e765bd04f158a3d1876604cc91e, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/A/31f422ae20374a96a94b697d7bb76e4c, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/A/f2c4d3cf011946ed89b46f75da3e70f8, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/A/e3c037045dfd4b5b8a6f7029565c6f2d, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/A/ccc406ad963f4dca98898037e8a8b220, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/A/c234e6c2a19c4223830f8ce5621ef419, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/A/0a169636d1654da89253f3d6cea7546a, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/A/bab7cac8b9344a678e3bfe8e6db12471, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/A/28cdbda66bb14e2a939e079267c710fe] to archive 2024-12-06T10:19:17,165 DEBUG [StoreCloser-TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-12-06T10:19:17,166 DEBUG [StoreCloser-TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/A/213be9078a724502b0543613c355e47e to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/A/213be9078a724502b0543613c355e47e 2024-12-06T10:19:17,167 DEBUG [StoreCloser-TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/A/9e4917827bd84af7bcc3bfe3ef1af491 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/A/9e4917827bd84af7bcc3bfe3ef1af491 2024-12-06T10:19:17,168 DEBUG [StoreCloser-TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/A/694462eab5524b97b36c8014089bcb29 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/A/694462eab5524b97b36c8014089bcb29 2024-12-06T10:19:17,168 DEBUG [StoreCloser-TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/A/1b66dbc97bb641bb9413abf67d2ed65d to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/A/1b66dbc97bb641bb9413abf67d2ed65d 2024-12-06T10:19:17,169 DEBUG [StoreCloser-TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/A/4a60b7efd80744a3b21e24a48bcfb2fc to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/A/4a60b7efd80744a3b21e24a48bcfb2fc 2024-12-06T10:19:17,170 DEBUG [StoreCloser-TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/A/07d426ba1bd44b2f9cd1e3185c956a7e to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/A/07d426ba1bd44b2f9cd1e3185c956a7e 2024-12-06T10:19:17,171 DEBUG [StoreCloser-TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/A/92bff4133e2c4828924453d58bbaa7c5 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/A/92bff4133e2c4828924453d58bbaa7c5 2024-12-06T10:19:17,171 DEBUG [StoreCloser-TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/A/6eab958228124930a5383b219bfe0321 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/A/6eab958228124930a5383b219bfe0321 2024-12-06T10:19:17,172 DEBUG [StoreCloser-TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/A/33edb85437004723baa4de97de365b34 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/A/33edb85437004723baa4de97de365b34 2024-12-06T10:19:17,173 DEBUG [StoreCloser-TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/A/876c3e48ec8a49e29f2c5991165d78e2 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/A/876c3e48ec8a49e29f2c5991165d78e2 2024-12-06T10:19:17,174 DEBUG [StoreCloser-TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/A/1d97ff855e6a4cb0bbcdaadae6aedccd to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/A/1d97ff855e6a4cb0bbcdaadae6aedccd 2024-12-06T10:19:17,175 DEBUG [StoreCloser-TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/A/58a84f83239949f3972a01b499ed0c8a to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/A/58a84f83239949f3972a01b499ed0c8a 2024-12-06T10:19:17,176 DEBUG [StoreCloser-TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/A/9d28ebac95f64c3998f275f2d64ded22 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/A/9d28ebac95f64c3998f275f2d64ded22 2024-12-06T10:19:17,176 DEBUG [StoreCloser-TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/A/60a2d5a4cce74fbba3fab7f61d0a27ec to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/A/60a2d5a4cce74fbba3fab7f61d0a27ec 2024-12-06T10:19:17,177 DEBUG [StoreCloser-TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/A/156cba7e40574305a18c45c0f18f3b0c to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/A/156cba7e40574305a18c45c0f18f3b0c 2024-12-06T10:19:17,178 DEBUG [StoreCloser-TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/A/adc38d33d639496a80e29e668623af2f to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/A/adc38d33d639496a80e29e668623af2f 2024-12-06T10:19:17,179 DEBUG [StoreCloser-TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/A/dbc05e765bd04f158a3d1876604cc91e to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/A/dbc05e765bd04f158a3d1876604cc91e 2024-12-06T10:19:17,180 DEBUG [StoreCloser-TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/A/31f422ae20374a96a94b697d7bb76e4c to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/A/31f422ae20374a96a94b697d7bb76e4c 2024-12-06T10:19:17,181 DEBUG [StoreCloser-TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/A/f2c4d3cf011946ed89b46f75da3e70f8 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/A/f2c4d3cf011946ed89b46f75da3e70f8 2024-12-06T10:19:17,182 DEBUG [StoreCloser-TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/A/e3c037045dfd4b5b8a6f7029565c6f2d to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/A/e3c037045dfd4b5b8a6f7029565c6f2d 2024-12-06T10:19:17,183 DEBUG [StoreCloser-TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/A/ccc406ad963f4dca98898037e8a8b220 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/A/ccc406ad963f4dca98898037e8a8b220 2024-12-06T10:19:17,184 DEBUG [StoreCloser-TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/A/c234e6c2a19c4223830f8ce5621ef419 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/A/c234e6c2a19c4223830f8ce5621ef419 2024-12-06T10:19:17,184 DEBUG [StoreCloser-TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/A/0a169636d1654da89253f3d6cea7546a to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/A/0a169636d1654da89253f3d6cea7546a 2024-12-06T10:19:17,185 DEBUG [StoreCloser-TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/A/bab7cac8b9344a678e3bfe8e6db12471 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/A/bab7cac8b9344a678e3bfe8e6db12471 2024-12-06T10:19:17,186 DEBUG [StoreCloser-TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/A/28cdbda66bb14e2a939e079267c710fe to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/A/28cdbda66bb14e2a939e079267c710fe 2024-12-06T10:19:17,187 DEBUG [StoreCloser-TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/B/080a8dcf21fc4f1b9bee7f866a3b6e01, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/B/851a20bbe2d044b582a0618bd4e1937d, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/B/dbb6d000bed740afb91e9e97dd22c13e, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/B/651f85b097b94b7c99329e5bbaddbb17, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/B/72db4305aa1d4756a1d21ef1833dbd1e, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/B/ed07336465164338b0544d78e058876e, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/B/92b2b77b87cb4b248d55da41c331e1d3, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/B/e80c848838ad4af284b6e1d53a60a69a, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/B/0d3c0a9a59214a299be1b5d876c6861b, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/B/18692387529648b89a4897f4e47b955c, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/B/d76ac71130cd4ff18801e32e486c9c2b, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/B/0a7699cd9c98473383f54b9b57d28ddf, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/B/a5d29ea9fdc043519bc0abe811215b16, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/B/8892a451950b4217ad3773bb3d40fd16, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/B/c10a8b2927074400bb1373e1fb053cea, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/B/096b06f5d8d147edaab606543bca35b6, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/B/36232e0d12b24de6bb42306212b283ab, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/B/8b60835b913e49c98cf75cf2db1e996c, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/B/bdaed48f5fd24b7fb5cda1797d639af8, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/B/7039f35d1a444aa19b21d62acc5ef5b0, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/B/6db3f4694bcb4231924e1212a8d985e8, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/B/a8a9db3bff954ae9a20104fdc881bab4, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/B/485b6fbf06984a1692066bfc4114cb75, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/B/46ddf427efcc4d2a9694d35d5de56bb8, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/B/38f9e5cfaac24627b1ced296c665101b] to archive 2024-12-06T10:19:17,188 DEBUG [StoreCloser-TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-12-06T10:19:17,189 DEBUG [StoreCloser-TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/B/080a8dcf21fc4f1b9bee7f866a3b6e01 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/B/080a8dcf21fc4f1b9bee7f866a3b6e01 2024-12-06T10:19:17,189 DEBUG [StoreCloser-TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/B/851a20bbe2d044b582a0618bd4e1937d to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/B/851a20bbe2d044b582a0618bd4e1937d 2024-12-06T10:19:17,190 DEBUG [StoreCloser-TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/B/dbb6d000bed740afb91e9e97dd22c13e to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/B/dbb6d000bed740afb91e9e97dd22c13e 2024-12-06T10:19:17,191 DEBUG [StoreCloser-TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/B/651f85b097b94b7c99329e5bbaddbb17 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/B/651f85b097b94b7c99329e5bbaddbb17 2024-12-06T10:19:17,192 DEBUG [StoreCloser-TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/B/72db4305aa1d4756a1d21ef1833dbd1e to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/B/72db4305aa1d4756a1d21ef1833dbd1e 2024-12-06T10:19:17,192 DEBUG [StoreCloser-TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/B/ed07336465164338b0544d78e058876e to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/B/ed07336465164338b0544d78e058876e 2024-12-06T10:19:17,193 DEBUG [StoreCloser-TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/B/92b2b77b87cb4b248d55da41c331e1d3 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/B/92b2b77b87cb4b248d55da41c331e1d3 2024-12-06T10:19:17,194 DEBUG [StoreCloser-TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/B/e80c848838ad4af284b6e1d53a60a69a to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/B/e80c848838ad4af284b6e1d53a60a69a 2024-12-06T10:19:17,195 DEBUG [StoreCloser-TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/B/0d3c0a9a59214a299be1b5d876c6861b to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/B/0d3c0a9a59214a299be1b5d876c6861b 2024-12-06T10:19:17,196 DEBUG [StoreCloser-TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/B/18692387529648b89a4897f4e47b955c to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/B/18692387529648b89a4897f4e47b955c 2024-12-06T10:19:17,197 DEBUG [StoreCloser-TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/B/d76ac71130cd4ff18801e32e486c9c2b to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/B/d76ac71130cd4ff18801e32e486c9c2b 2024-12-06T10:19:17,197 DEBUG [StoreCloser-TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/B/0a7699cd9c98473383f54b9b57d28ddf to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/B/0a7699cd9c98473383f54b9b57d28ddf 2024-12-06T10:19:17,198 DEBUG [StoreCloser-TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/B/a5d29ea9fdc043519bc0abe811215b16 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/B/a5d29ea9fdc043519bc0abe811215b16 2024-12-06T10:19:17,199 DEBUG [StoreCloser-TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/B/8892a451950b4217ad3773bb3d40fd16 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/B/8892a451950b4217ad3773bb3d40fd16 2024-12-06T10:19:17,200 DEBUG [StoreCloser-TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/B/c10a8b2927074400bb1373e1fb053cea to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/B/c10a8b2927074400bb1373e1fb053cea 2024-12-06T10:19:17,200 DEBUG [StoreCloser-TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/B/096b06f5d8d147edaab606543bca35b6 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/B/096b06f5d8d147edaab606543bca35b6 2024-12-06T10:19:17,201 DEBUG [StoreCloser-TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/B/36232e0d12b24de6bb42306212b283ab to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/B/36232e0d12b24de6bb42306212b283ab 2024-12-06T10:19:17,202 DEBUG [StoreCloser-TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/B/8b60835b913e49c98cf75cf2db1e996c to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/B/8b60835b913e49c98cf75cf2db1e996c 2024-12-06T10:19:17,202 DEBUG [StoreCloser-TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/B/bdaed48f5fd24b7fb5cda1797d639af8 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/B/bdaed48f5fd24b7fb5cda1797d639af8 2024-12-06T10:19:17,203 DEBUG [StoreCloser-TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/B/7039f35d1a444aa19b21d62acc5ef5b0 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/B/7039f35d1a444aa19b21d62acc5ef5b0 2024-12-06T10:19:17,204 DEBUG [StoreCloser-TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/B/6db3f4694bcb4231924e1212a8d985e8 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/B/6db3f4694bcb4231924e1212a8d985e8 2024-12-06T10:19:17,204 DEBUG [StoreCloser-TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/B/a8a9db3bff954ae9a20104fdc881bab4 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/B/a8a9db3bff954ae9a20104fdc881bab4 2024-12-06T10:19:17,205 DEBUG [StoreCloser-TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/B/485b6fbf06984a1692066bfc4114cb75 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/B/485b6fbf06984a1692066bfc4114cb75 2024-12-06T10:19:17,206 DEBUG [StoreCloser-TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/B/46ddf427efcc4d2a9694d35d5de56bb8 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/B/46ddf427efcc4d2a9694d35d5de56bb8 2024-12-06T10:19:17,207 DEBUG [StoreCloser-TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/B/38f9e5cfaac24627b1ced296c665101b to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/B/38f9e5cfaac24627b1ced296c665101b 2024-12-06T10:19:17,208 DEBUG [StoreCloser-TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/C/72cd60b0612047c0b4a9999b6c8c5dde, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/C/687c41a39fc04694acd44ef97d79e0d9, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/C/1e435e337b394cc995635d67f8a264c1, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/C/3821dc29e34844c7a664312b9be0455a, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/C/f730a319a48e4ea1a3a714e975328190, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/C/3508611c5da44ec29d34d92c6e30187c, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/C/1bb19bc19e4943a1845c2f044b9b628b, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/C/aeaab6fa81604ee4bc7777ccc75d3a24, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/C/967785be353b48d0b1a0ff9c1877fdf3, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/C/b634e0edd5c447ac8f91e63cd256eac5, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/C/ba0d412334824952b83ab1759fd80f1d, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/C/48b76e4bc7fe4f638a7dc12b1d73766f, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/C/6485528518224d73b1fbddc371c4856b, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/C/ab87017181ea48bda380b3ab3e499571, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/C/3f5fe7ff58aa48d4a40c068f838492ca, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/C/f077d6017e764d4997eabb08d666a9d7, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/C/3f86e39071fc4108bc895f65cbc1498e, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/C/62d6a3bcf514484787d984efc0c74d09, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/C/5f10af9c04464dea9142e8b896636dfa, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/C/5bbefa46033742a6a741e85bafb7005e, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/C/ff4e0554cb964672b797c1794dba395d, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/C/3ffd5073e7dc4242b1780dc0d51c7769, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/C/458441621d774baeb80a120bdeb22672, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/C/2bf7fc4f543d4a5ba932681441395edd, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/C/f92e151115324877ab02e61c0337a451] to archive 2024-12-06T10:19:17,208 DEBUG [StoreCloser-TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-12-06T10:19:17,209 DEBUG [StoreCloser-TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/C/72cd60b0612047c0b4a9999b6c8c5dde to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/C/72cd60b0612047c0b4a9999b6c8c5dde 2024-12-06T10:19:17,210 DEBUG [StoreCloser-TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/C/687c41a39fc04694acd44ef97d79e0d9 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/C/687c41a39fc04694acd44ef97d79e0d9 2024-12-06T10:19:17,211 DEBUG [StoreCloser-TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/C/1e435e337b394cc995635d67f8a264c1 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/C/1e435e337b394cc995635d67f8a264c1 2024-12-06T10:19:17,211 DEBUG [StoreCloser-TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/C/3821dc29e34844c7a664312b9be0455a to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/C/3821dc29e34844c7a664312b9be0455a 2024-12-06T10:19:17,212 DEBUG [StoreCloser-TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/C/f730a319a48e4ea1a3a714e975328190 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/C/f730a319a48e4ea1a3a714e975328190 2024-12-06T10:19:17,213 DEBUG [StoreCloser-TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/C/3508611c5da44ec29d34d92c6e30187c to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/C/3508611c5da44ec29d34d92c6e30187c 2024-12-06T10:19:17,214 DEBUG [StoreCloser-TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/C/1bb19bc19e4943a1845c2f044b9b628b to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/C/1bb19bc19e4943a1845c2f044b9b628b 2024-12-06T10:19:17,214 DEBUG [StoreCloser-TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/C/aeaab6fa81604ee4bc7777ccc75d3a24 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/C/aeaab6fa81604ee4bc7777ccc75d3a24 2024-12-06T10:19:17,215 DEBUG [StoreCloser-TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/C/967785be353b48d0b1a0ff9c1877fdf3 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/C/967785be353b48d0b1a0ff9c1877fdf3 2024-12-06T10:19:17,216 DEBUG [StoreCloser-TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/C/b634e0edd5c447ac8f91e63cd256eac5 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/C/b634e0edd5c447ac8f91e63cd256eac5 2024-12-06T10:19:17,218 DEBUG [StoreCloser-TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/C/ba0d412334824952b83ab1759fd80f1d to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/C/ba0d412334824952b83ab1759fd80f1d 2024-12-06T10:19:17,219 DEBUG [StoreCloser-TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/C/48b76e4bc7fe4f638a7dc12b1d73766f to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/C/48b76e4bc7fe4f638a7dc12b1d73766f 2024-12-06T10:19:17,219 DEBUG [StoreCloser-TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/C/6485528518224d73b1fbddc371c4856b to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/C/6485528518224d73b1fbddc371c4856b 2024-12-06T10:19:17,220 DEBUG [StoreCloser-TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/C/ab87017181ea48bda380b3ab3e499571 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/C/ab87017181ea48bda380b3ab3e499571 2024-12-06T10:19:17,221 DEBUG [StoreCloser-TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/C/3f5fe7ff58aa48d4a40c068f838492ca to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/C/3f5fe7ff58aa48d4a40c068f838492ca 2024-12-06T10:19:17,221 DEBUG [StoreCloser-TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/C/f077d6017e764d4997eabb08d666a9d7 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/C/f077d6017e764d4997eabb08d666a9d7 2024-12-06T10:19:17,222 DEBUG [StoreCloser-TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/C/3f86e39071fc4108bc895f65cbc1498e to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/C/3f86e39071fc4108bc895f65cbc1498e 2024-12-06T10:19:17,223 DEBUG [StoreCloser-TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/C/62d6a3bcf514484787d984efc0c74d09 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/C/62d6a3bcf514484787d984efc0c74d09 2024-12-06T10:19:17,224 DEBUG [StoreCloser-TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/C/5f10af9c04464dea9142e8b896636dfa to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/C/5f10af9c04464dea9142e8b896636dfa 2024-12-06T10:19:17,224 DEBUG [StoreCloser-TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/C/5bbefa46033742a6a741e85bafb7005e to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/C/5bbefa46033742a6a741e85bafb7005e 2024-12-06T10:19:17,225 DEBUG [StoreCloser-TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/C/ff4e0554cb964672b797c1794dba395d to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/C/ff4e0554cb964672b797c1794dba395d 2024-12-06T10:19:17,226 DEBUG [StoreCloser-TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/C/3ffd5073e7dc4242b1780dc0d51c7769 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/C/3ffd5073e7dc4242b1780dc0d51c7769 2024-12-06T10:19:17,226 DEBUG [StoreCloser-TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/C/458441621d774baeb80a120bdeb22672 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/C/458441621d774baeb80a120bdeb22672 2024-12-06T10:19:17,227 DEBUG [StoreCloser-TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/C/2bf7fc4f543d4a5ba932681441395edd to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/C/2bf7fc4f543d4a5ba932681441395edd 2024-12-06T10:19:17,228 DEBUG [StoreCloser-TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/C/f92e151115324877ab02e61c0337a451 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/C/f92e151115324877ab02e61c0337a451 2024-12-06T10:19:17,231 DEBUG [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION, pid=153}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/recovered.edits/384.seqid, newMaxSeqId=384, maxSeqId=1 2024-12-06T10:19:17,232 INFO [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION, pid=153}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d. 2024-12-06T10:19:17,232 DEBUG [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION, pid=153}] regionserver.HRegion(1635): Region close journal for fa004d65282160f629f3eb2a5c9dca1d: 2024-12-06T10:19:17,233 INFO [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION, pid=153}] handler.UnassignRegionHandler(170): Closed fa004d65282160f629f3eb2a5c9dca1d 2024-12-06T10:19:17,233 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=152 updating hbase:meta row=fa004d65282160f629f3eb2a5c9dca1d, regionState=CLOSED 2024-12-06T10:19:17,235 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=153, resume processing ppid=152 2024-12-06T10:19:17,235 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=153, ppid=152, state=SUCCESS; CloseRegionProcedure fa004d65282160f629f3eb2a5c9dca1d, server=552d6a33fa09,33397,1733480204743 in 1.4630 sec 2024-12-06T10:19:17,236 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=152, resume processing ppid=151 2024-12-06T10:19:17,236 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=152, ppid=151, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=fa004d65282160f629f3eb2a5c9dca1d, UNASSIGN in 1.4660 sec 2024-12-06T10:19:17,237 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=151, resume processing ppid=150 2024-12-06T10:19:17,237 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=151, ppid=150, state=SUCCESS; CloseTableRegionsProcedure table=TestAcidGuarantees in 1.4680 sec 2024-12-06T10:19:17,238 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733480357238"}]},"ts":"1733480357238"} 2024-12-06T10:19:17,239 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLED in hbase:meta 2024-12-06T10:19:17,241 INFO [PEWorker-5 {}] procedure.DisableTableProcedure(296): Set TestAcidGuarantees to state=DISABLED 2024-12-06T10:19:17,242 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=150, state=SUCCESS; DisableTableProcedure table=TestAcidGuarantees in 1.4810 sec 2024-12-06T10:19:17,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=150 2024-12-06T10:19:17,867 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:TestAcidGuarantees, procId: 150 completed 2024-12-06T10:19:17,867 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33169 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.2 delete TestAcidGuarantees 2024-12-06T10:19:17,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33169 {}] procedure2.ProcedureExecutor(1098): Stored pid=154, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=TestAcidGuarantees 2024-12-06T10:19:17,869 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=154, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-06T10:19:17,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=154 2024-12-06T10:19:17,869 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=154, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-06T10:19:17,871 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d 2024-12-06T10:19:17,872 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/A, FileablePath, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/B, FileablePath, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/C, FileablePath, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/recovered.edits] 2024-12-06T10:19:17,874 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/A/3a1b79aa735a47d38969df3aef684d7f to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/A/3a1b79aa735a47d38969df3aef684d7f 2024-12-06T10:19:17,875 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/A/91ead72c81cb48d58f9be02a7a304adf to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/A/91ead72c81cb48d58f9be02a7a304adf 2024-12-06T10:19:17,876 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/A/cb804343d2c74b999e6546680b849117 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/A/cb804343d2c74b999e6546680b849117 2024-12-06T10:19:17,877 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/B/913a4f1b4a514e57b7e94e5379237553 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/B/913a4f1b4a514e57b7e94e5379237553 2024-12-06T10:19:17,878 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/B/b523b28860744c88843821548f54336e to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/B/b523b28860744c88843821548f54336e 2024-12-06T10:19:17,879 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/B/fc5cc7a8a15d47f0b3c254a7495f5253 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/B/fc5cc7a8a15d47f0b3c254a7495f5253 2024-12-06T10:19:17,881 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/C/32be049d9fbb4d918db3c42dc5b21a35 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/C/32be049d9fbb4d918db3c42dc5b21a35 2024-12-06T10:19:17,881 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/C/39163fdf209040a095995439c4797b8b to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/C/39163fdf209040a095995439c4797b8b 2024-12-06T10:19:17,882 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/C/4be468c810a4401b914e0d0a8d16da2d to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/C/4be468c810a4401b914e0d0a8d16da2d 2024-12-06T10:19:17,884 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/recovered.edits/384.seqid to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d/recovered.edits/384.seqid 2024-12-06T10:19:17,885 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/fa004d65282160f629f3eb2a5c9dca1d 2024-12-06T10:19:17,885 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(313): Archived TestAcidGuarantees regions 2024-12-06T10:19:17,887 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=154, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-06T10:19:17,888 WARN [PEWorker-3 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 1 rows of TestAcidGuarantees from hbase:meta 2024-12-06T10:19:17,890 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(407): Removing 'TestAcidGuarantees' descriptor. 2024-12-06T10:19:17,890 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=154, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-06T10:19:17,890 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(397): Removing 'TestAcidGuarantees' from region states. 2024-12-06T10:19:17,891 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733480357890"}]},"ts":"9223372036854775807"} 2024-12-06T10:19:17,892 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1808): Deleted 1 regions from META 2024-12-06T10:19:17,892 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => fa004d65282160f629f3eb2a5c9dca1d, NAME => 'TestAcidGuarantees,,1733480331512.fa004d65282160f629f3eb2a5c9dca1d.', STARTKEY => '', ENDKEY => ''}] 2024-12-06T10:19:17,892 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(401): Marking 'TestAcidGuarantees' as deleted. 2024-12-06T10:19:17,892 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1733480357892"}]},"ts":"9223372036854775807"} 2024-12-06T10:19:17,893 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1678): Deleted table TestAcidGuarantees state from META 2024-12-06T10:19:17,896 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(133): Finished pid=154, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-06T10:19:17,897 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=154, state=SUCCESS; DeleteTableProcedure table=TestAcidGuarantees in 29 msec 2024-12-06T10:19:17,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=154 2024-12-06T10:19:17,970 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:TestAcidGuarantees, procId: 154 completed 2024-12-06T10:19:17,980 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: TestAcidGuaranteesWithAdaptivePolicy#testScanAtomicity Thread=240 (was 241), OpenFileDescriptor=454 (was 456), MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=442 (was 408) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=6151 (was 6444) 2024-12-06T10:19:17,988 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: TestAcidGuaranteesWithAdaptivePolicy#testMobGetAtomicity Thread=240, OpenFileDescriptor=454, MaxFileDescriptor=1048576, SystemLoadAverage=442, ProcessCount=11, AvailableMemoryMB=6151 2024-12-06T10:19:17,989 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33169 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-12-06T10:19:17,990 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33169 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-06T10:19:17,990 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33169 {}] procedure2.ProcedureExecutor(1098): Stored pid=155, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=TestAcidGuarantees 2024-12-06T10:19:17,991 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=155, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_PRE_OPERATION 2024-12-06T10:19:17,991 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:19:17,991 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33169 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestAcidGuarantees" procId is: 155 2024-12-06T10:19:17,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=155 2024-12-06T10:19:17,992 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=155, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-06T10:19:17,996 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742442_1618 (size=963) 2024-12-06T10:19:18,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=155 2024-12-06T10:19:18,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=155 2024-12-06T10:19:18,398 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 7b4c4e64c91c50413b4c0cd97a01bcb8, NAME => 'TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4 2024-12-06T10:19:18,403 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742443_1619 (size=53) 2024-12-06T10:19:18,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=155 2024-12-06T10:19:18,805 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-06T10:19:18,805 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1681): Closing 7b4c4e64c91c50413b4c0cd97a01bcb8, disabling compactions & flushes 2024-12-06T10:19:18,805 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8. 2024-12-06T10:19:18,805 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8. 2024-12-06T10:19:18,805 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8. after waiting 0 ms 2024-12-06T10:19:18,805 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8. 2024-12-06T10:19:18,805 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8. 2024-12-06T10:19:18,805 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1635): Region close journal for 7b4c4e64c91c50413b4c0cd97a01bcb8: 2024-12-06T10:19:18,806 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=155, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ADD_TO_META 2024-12-06T10:19:18,806 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8.","families":{"info":[{"qualifier":"regioninfo","vlen":52,"tag":[],"timestamp":"1733480358806"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733480358806"}]},"ts":"1733480358806"} 2024-12-06T10:19:18,807 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-12-06T10:19:18,807 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=155, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-06T10:19:18,808 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733480358808"}]},"ts":"1733480358808"} 2024-12-06T10:19:18,808 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLING in hbase:meta 2024-12-06T10:19:18,813 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=156, ppid=155, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=7b4c4e64c91c50413b4c0cd97a01bcb8, ASSIGN}] 2024-12-06T10:19:18,814 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=156, ppid=155, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=7b4c4e64c91c50413b4c0cd97a01bcb8, ASSIGN 2024-12-06T10:19:18,815 INFO [PEWorker-1 {}] assignment.TransitRegionStateProcedure(264): Starting pid=156, ppid=155, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=7b4c4e64c91c50413b4c0cd97a01bcb8, ASSIGN; state=OFFLINE, location=552d6a33fa09,33397,1733480204743; forceNewPlan=false, retain=false 2024-12-06T10:19:18,965 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=156 updating hbase:meta row=7b4c4e64c91c50413b4c0cd97a01bcb8, regionState=OPENING, regionLocation=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:18,966 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=157, ppid=156, state=RUNNABLE; OpenRegionProcedure 7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743}] 2024-12-06T10:19:19,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=155 2024-12-06T10:19:19,118 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,33397,1733480204743 2024-12-06T10:19:19,120 INFO [RS_OPEN_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_REGION, pid=157}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8. 2024-12-06T10:19:19,120 DEBUG [RS_OPEN_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_REGION, pid=157}] regionserver.HRegion(7285): Opening region: {ENCODED => 7b4c4e64c91c50413b4c0cd97a01bcb8, NAME => 'TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8.', STARTKEY => '', ENDKEY => ''} 2024-12-06T10:19:19,121 DEBUG [RS_OPEN_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_REGION, pid=157}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees 7b4c4e64c91c50413b4c0cd97a01bcb8 2024-12-06T10:19:19,121 DEBUG [RS_OPEN_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_REGION, pid=157}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-06T10:19:19,121 DEBUG [RS_OPEN_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_REGION, pid=157}] regionserver.HRegion(7327): checking encryption for 7b4c4e64c91c50413b4c0cd97a01bcb8 2024-12-06T10:19:19,121 DEBUG [RS_OPEN_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_REGION, pid=157}] regionserver.HRegion(7330): checking classloading for 7b4c4e64c91c50413b4c0cd97a01bcb8 2024-12-06T10:19:19,122 INFO [StoreOpener-7b4c4e64c91c50413b4c0cd97a01bcb8-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region 7b4c4e64c91c50413b4c0cd97a01bcb8 2024-12-06T10:19:19,123 INFO [StoreOpener-7b4c4e64c91c50413b4c0cd97a01bcb8-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-06T10:19:19,123 INFO [StoreOpener-7b4c4e64c91c50413b4c0cd97a01bcb8-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 7b4c4e64c91c50413b4c0cd97a01bcb8 columnFamilyName A 2024-12-06T10:19:19,123 DEBUG [StoreOpener-7b4c4e64c91c50413b4c0cd97a01bcb8-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:19:19,124 INFO [StoreOpener-7b4c4e64c91c50413b4c0cd97a01bcb8-1 {}] regionserver.HStore(327): Store=7b4c4e64c91c50413b4c0cd97a01bcb8/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-06T10:19:19,124 INFO [StoreOpener-7b4c4e64c91c50413b4c0cd97a01bcb8-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region 7b4c4e64c91c50413b4c0cd97a01bcb8 2024-12-06T10:19:19,125 INFO [StoreOpener-7b4c4e64c91c50413b4c0cd97a01bcb8-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-06T10:19:19,125 INFO [StoreOpener-7b4c4e64c91c50413b4c0cd97a01bcb8-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 7b4c4e64c91c50413b4c0cd97a01bcb8 columnFamilyName B 2024-12-06T10:19:19,125 DEBUG [StoreOpener-7b4c4e64c91c50413b4c0cd97a01bcb8-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:19:19,125 INFO [StoreOpener-7b4c4e64c91c50413b4c0cd97a01bcb8-1 {}] regionserver.HStore(327): Store=7b4c4e64c91c50413b4c0cd97a01bcb8/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-06T10:19:19,125 INFO [StoreOpener-7b4c4e64c91c50413b4c0cd97a01bcb8-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region 7b4c4e64c91c50413b4c0cd97a01bcb8 2024-12-06T10:19:19,126 INFO [StoreOpener-7b4c4e64c91c50413b4c0cd97a01bcb8-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-06T10:19:19,126 INFO [StoreOpener-7b4c4e64c91c50413b4c0cd97a01bcb8-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 7b4c4e64c91c50413b4c0cd97a01bcb8 columnFamilyName C 2024-12-06T10:19:19,126 DEBUG [StoreOpener-7b4c4e64c91c50413b4c0cd97a01bcb8-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:19:19,126 INFO [StoreOpener-7b4c4e64c91c50413b4c0cd97a01bcb8-1 {}] regionserver.HStore(327): Store=7b4c4e64c91c50413b4c0cd97a01bcb8/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-06T10:19:19,127 INFO [RS_OPEN_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_REGION, pid=157}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8. 2024-12-06T10:19:19,127 DEBUG [RS_OPEN_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_REGION, pid=157}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8 2024-12-06T10:19:19,127 DEBUG [RS_OPEN_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_REGION, pid=157}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8 2024-12-06T10:19:19,128 DEBUG [RS_OPEN_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_REGION, pid=157}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-06T10:19:19,129 DEBUG [RS_OPEN_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_REGION, pid=157}] regionserver.HRegion(1085): writing seq id for 7b4c4e64c91c50413b4c0cd97a01bcb8 2024-12-06T10:19:19,131 DEBUG [RS_OPEN_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_REGION, pid=157}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-06T10:19:19,131 INFO [RS_OPEN_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_REGION, pid=157}] regionserver.HRegion(1102): Opened 7b4c4e64c91c50413b4c0cd97a01bcb8; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=71499081, jitterRate=0.06541933119297028}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-06T10:19:19,131 DEBUG [RS_OPEN_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_REGION, pid=157}] regionserver.HRegion(1001): Region open journal for 7b4c4e64c91c50413b4c0cd97a01bcb8: 2024-12-06T10:19:19,132 INFO [RS_OPEN_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_REGION, pid=157}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8., pid=157, masterSystemTime=1733480359118 2024-12-06T10:19:19,133 DEBUG [RS_OPEN_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_REGION, pid=157}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8. 2024-12-06T10:19:19,133 INFO [RS_OPEN_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_REGION, pid=157}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8. 2024-12-06T10:19:19,134 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=156 updating hbase:meta row=7b4c4e64c91c50413b4c0cd97a01bcb8, regionState=OPEN, openSeqNum=2, regionLocation=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:19,135 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=157, resume processing ppid=156 2024-12-06T10:19:19,135 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=157, ppid=156, state=SUCCESS; OpenRegionProcedure 7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 in 168 msec 2024-12-06T10:19:19,136 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=156, resume processing ppid=155 2024-12-06T10:19:19,136 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=156, ppid=155, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=7b4c4e64c91c50413b4c0cd97a01bcb8, ASSIGN in 322 msec 2024-12-06T10:19:19,137 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=155, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-06T10:19:19,137 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733480359137"}]},"ts":"1733480359137"} 2024-12-06T10:19:19,138 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLED in hbase:meta 2024-12-06T10:19:19,141 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=155, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_POST_OPERATION 2024-12-06T10:19:19,142 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=155, state=SUCCESS; CreateTableProcedure table=TestAcidGuarantees in 1.1510 sec 2024-12-06T10:19:20,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=155 2024-12-06T10:19:20,095 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:TestAcidGuarantees, procId: 155 completed 2024-12-06T10:19:20,097 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x2209c520 to 127.0.0.1:61610 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@5765d46a 2024-12-06T10:19:20,102 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6d9954b7, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-06T10:19:20,103 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-06T10:19:20,104 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:56938, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-06T10:19:20,105 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-06T10:19:20,106 INFO [RS-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:56822, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-06T10:19:20,107 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33169 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-12-06T10:19:20,107 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33169 {}] master.HMaster$14(2798): Client=jenkins//172.17.0.2 modify table TestAcidGuarantees from 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} to 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '4', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-06T10:19:20,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33169 {}] procedure2.ProcedureExecutor(1098): Stored pid=158, state=RUNNABLE:MODIFY_TABLE_PREPARE; ModifyTableProcedure table=TestAcidGuarantees 2024-12-06T10:19:20,115 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742444_1620 (size=999) 2024-12-06T10:19:20,517 DEBUG [PEWorker-2 {}] util.FSTableDescriptors(519): Deleted hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/.tabledesc/.tableinfo.0000000001.963 2024-12-06T10:19:20,517 INFO [PEWorker-2 {}] util.FSTableDescriptors(297): Updated tableinfo=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/.tabledesc/.tableinfo.0000000002.999 2024-12-06T10:19:20,519 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=159, ppid=158, state=RUNNABLE:REOPEN_TABLE_REGIONS_GET_REGIONS; ReopenTableRegionsProcedure table=TestAcidGuarantees}] 2024-12-06T10:19:20,520 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=160, ppid=159, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=7b4c4e64c91c50413b4c0cd97a01bcb8, REOPEN/MOVE}] 2024-12-06T10:19:20,521 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=160, ppid=159, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=7b4c4e64c91c50413b4c0cd97a01bcb8, REOPEN/MOVE 2024-12-06T10:19:20,521 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=160 updating hbase:meta row=7b4c4e64c91c50413b4c0cd97a01bcb8, regionState=CLOSING, regionLocation=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:20,522 DEBUG [PEWorker-3 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-06T10:19:20,522 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=161, ppid=160, state=RUNNABLE; CloseRegionProcedure 7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743}] 2024-12-06T10:19:20,673 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,33397,1733480204743 2024-12-06T10:19:20,674 INFO [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION, pid=161}] handler.UnassignRegionHandler(124): Close 7b4c4e64c91c50413b4c0cd97a01bcb8 2024-12-06T10:19:20,674 DEBUG [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION, pid=161}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-06T10:19:20,674 DEBUG [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION, pid=161}] regionserver.HRegion(1681): Closing 7b4c4e64c91c50413b4c0cd97a01bcb8, disabling compactions & flushes 2024-12-06T10:19:20,674 INFO [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION, pid=161}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8. 2024-12-06T10:19:20,674 DEBUG [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION, pid=161}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8. 2024-12-06T10:19:20,674 DEBUG [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION, pid=161}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8. after waiting 0 ms 2024-12-06T10:19:20,674 DEBUG [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION, pid=161}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8. 2024-12-06T10:19:20,677 DEBUG [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION, pid=161}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/recovered.edits/4.seqid, newMaxSeqId=4, maxSeqId=1 2024-12-06T10:19:20,678 INFO [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION, pid=161}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8. 2024-12-06T10:19:20,678 DEBUG [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION, pid=161}] regionserver.HRegion(1635): Region close journal for 7b4c4e64c91c50413b4c0cd97a01bcb8: 2024-12-06T10:19:20,678 WARN [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION, pid=161}] regionserver.HRegionServer(3786): Not adding moved region record: 7b4c4e64c91c50413b4c0cd97a01bcb8 to self. 2024-12-06T10:19:20,679 INFO [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION, pid=161}] handler.UnassignRegionHandler(170): Closed 7b4c4e64c91c50413b4c0cd97a01bcb8 2024-12-06T10:19:20,679 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=160 updating hbase:meta row=7b4c4e64c91c50413b4c0cd97a01bcb8, regionState=CLOSED 2024-12-06T10:19:20,681 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=161, resume processing ppid=160 2024-12-06T10:19:20,681 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=161, ppid=160, state=SUCCESS; CloseRegionProcedure 7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 in 158 msec 2024-12-06T10:19:20,681 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(264): Starting pid=160, ppid=159, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=7b4c4e64c91c50413b4c0cd97a01bcb8, REOPEN/MOVE; state=CLOSED, location=552d6a33fa09,33397,1733480204743; forceNewPlan=false, retain=true 2024-12-06T10:19:20,832 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=160 updating hbase:meta row=7b4c4e64c91c50413b4c0cd97a01bcb8, regionState=OPENING, regionLocation=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:20,833 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=162, ppid=160, state=RUNNABLE; OpenRegionProcedure 7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743}] 2024-12-06T10:19:20,985 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,33397,1733480204743 2024-12-06T10:19:20,987 INFO [RS_OPEN_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_REGION, pid=162}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8. 2024-12-06T10:19:20,987 DEBUG [RS_OPEN_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_REGION, pid=162}] regionserver.HRegion(7285): Opening region: {ENCODED => 7b4c4e64c91c50413b4c0cd97a01bcb8, NAME => 'TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8.', STARTKEY => '', ENDKEY => ''} 2024-12-06T10:19:20,987 DEBUG [RS_OPEN_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_REGION, pid=162}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees 7b4c4e64c91c50413b4c0cd97a01bcb8 2024-12-06T10:19:20,987 DEBUG [RS_OPEN_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_REGION, pid=162}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-06T10:19:20,987 DEBUG [RS_OPEN_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_REGION, pid=162}] regionserver.HRegion(7327): checking encryption for 7b4c4e64c91c50413b4c0cd97a01bcb8 2024-12-06T10:19:20,987 DEBUG [RS_OPEN_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_REGION, pid=162}] regionserver.HRegion(7330): checking classloading for 7b4c4e64c91c50413b4c0cd97a01bcb8 2024-12-06T10:19:20,988 INFO [StoreOpener-7b4c4e64c91c50413b4c0cd97a01bcb8-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region 7b4c4e64c91c50413b4c0cd97a01bcb8 2024-12-06T10:19:20,989 INFO [StoreOpener-7b4c4e64c91c50413b4c0cd97a01bcb8-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-06T10:19:20,989 INFO [StoreOpener-7b4c4e64c91c50413b4c0cd97a01bcb8-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 7b4c4e64c91c50413b4c0cd97a01bcb8 columnFamilyName A 2024-12-06T10:19:20,990 DEBUG [StoreOpener-7b4c4e64c91c50413b4c0cd97a01bcb8-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:19:20,990 INFO [StoreOpener-7b4c4e64c91c50413b4c0cd97a01bcb8-1 {}] regionserver.HStore(327): Store=7b4c4e64c91c50413b4c0cd97a01bcb8/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-06T10:19:20,991 INFO [StoreOpener-7b4c4e64c91c50413b4c0cd97a01bcb8-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region 7b4c4e64c91c50413b4c0cd97a01bcb8 2024-12-06T10:19:20,991 INFO [StoreOpener-7b4c4e64c91c50413b4c0cd97a01bcb8-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-06T10:19:20,991 INFO [StoreOpener-7b4c4e64c91c50413b4c0cd97a01bcb8-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 7b4c4e64c91c50413b4c0cd97a01bcb8 columnFamilyName B 2024-12-06T10:19:20,991 DEBUG [StoreOpener-7b4c4e64c91c50413b4c0cd97a01bcb8-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:19:20,991 INFO [StoreOpener-7b4c4e64c91c50413b4c0cd97a01bcb8-1 {}] regionserver.HStore(327): Store=7b4c4e64c91c50413b4c0cd97a01bcb8/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-06T10:19:20,992 INFO [StoreOpener-7b4c4e64c91c50413b4c0cd97a01bcb8-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region 7b4c4e64c91c50413b4c0cd97a01bcb8 2024-12-06T10:19:20,992 INFO [StoreOpener-7b4c4e64c91c50413b4c0cd97a01bcb8-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-06T10:19:20,992 INFO [StoreOpener-7b4c4e64c91c50413b4c0cd97a01bcb8-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 7b4c4e64c91c50413b4c0cd97a01bcb8 columnFamilyName C 2024-12-06T10:19:20,992 DEBUG [StoreOpener-7b4c4e64c91c50413b4c0cd97a01bcb8-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:19:20,993 INFO [StoreOpener-7b4c4e64c91c50413b4c0cd97a01bcb8-1 {}] regionserver.HStore(327): Store=7b4c4e64c91c50413b4c0cd97a01bcb8/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-06T10:19:20,993 INFO [RS_OPEN_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_REGION, pid=162}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8. 2024-12-06T10:19:20,994 DEBUG [RS_OPEN_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_REGION, pid=162}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8 2024-12-06T10:19:20,994 DEBUG [RS_OPEN_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_REGION, pid=162}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8 2024-12-06T10:19:20,995 DEBUG [RS_OPEN_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_REGION, pid=162}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-06T10:19:20,996 DEBUG [RS_OPEN_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_REGION, pid=162}] regionserver.HRegion(1085): writing seq id for 7b4c4e64c91c50413b4c0cd97a01bcb8 2024-12-06T10:19:20,997 INFO [RS_OPEN_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_REGION, pid=162}] regionserver.HRegion(1102): Opened 7b4c4e64c91c50413b4c0cd97a01bcb8; next sequenceid=5; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=71254886, jitterRate=0.06178054213523865}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-06T10:19:20,997 DEBUG [RS_OPEN_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_REGION, pid=162}] regionserver.HRegion(1001): Region open journal for 7b4c4e64c91c50413b4c0cd97a01bcb8: 2024-12-06T10:19:20,998 INFO [RS_OPEN_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_REGION, pid=162}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8., pid=162, masterSystemTime=1733480360984 2024-12-06T10:19:20,999 DEBUG [RS_OPEN_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_REGION, pid=162}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8. 2024-12-06T10:19:20,999 INFO [RS_OPEN_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_REGION, pid=162}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8. 2024-12-06T10:19:20,999 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=160 updating hbase:meta row=7b4c4e64c91c50413b4c0cd97a01bcb8, regionState=OPEN, openSeqNum=5, regionLocation=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:21,001 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=162, resume processing ppid=160 2024-12-06T10:19:21,001 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=162, ppid=160, state=SUCCESS; OpenRegionProcedure 7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 in 167 msec 2024-12-06T10:19:21,002 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=160, resume processing ppid=159 2024-12-06T10:19:21,002 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=160, ppid=159, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=7b4c4e64c91c50413b4c0cd97a01bcb8, REOPEN/MOVE in 481 msec 2024-12-06T10:19:21,004 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=159, resume processing ppid=158 2024-12-06T10:19:21,004 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=159, ppid=158, state=SUCCESS; ReopenTableRegionsProcedure table=TestAcidGuarantees in 484 msec 2024-12-06T10:19:21,005 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=158, state=SUCCESS; ModifyTableProcedure table=TestAcidGuarantees in 898 msec 2024-12-06T10:19:21,006 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=158 2024-12-06T10:19:21,007 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x537a66f8 to 127.0.0.1:61610 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@2ac53e79 2024-12-06T10:19:21,011 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@d5efb7a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-06T10:19:21,012 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x06094c70 to 127.0.0.1:61610 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@5bc9c3e 2024-12-06T10:19:21,015 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7fc332d8, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-06T10:19:21,015 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x103dfc6e to 127.0.0.1:61610 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@7181df3b 2024-12-06T10:19:21,018 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@17327621, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-06T10:19:21,019 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x6e047c09 to 127.0.0.1:61610 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@11030ef5 2024-12-06T10:19:21,025 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1584f18a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-06T10:19:21,026 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x60d631a3 to 127.0.0.1:61610 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@69abefea 2024-12-06T10:19:21,033 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5b914bf4, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-06T10:19:21,034 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x6e757135 to 127.0.0.1:61610 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@3f6a59e4 2024-12-06T10:19:21,038 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5d836f78, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-06T10:19:21,039 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x7846cb78 to 127.0.0.1:61610 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@150e08ed 2024-12-06T10:19:21,044 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@53305d9b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-06T10:19:21,045 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x5f1754bc to 127.0.0.1:61610 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@3a3b66d3 2024-12-06T10:19:21,049 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6bb6288a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-06T10:19:21,049 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x3d9113f3 to 127.0.0.1:61610 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@5cfdf76c 2024-12-06T10:19:21,054 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6556601, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-06T10:19:21,054 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x5bb75907 to 127.0.0.1:61610 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@68c2838a 2024-12-06T10:19:21,057 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@458a85fd, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-06T10:19:21,068 DEBUG [hconnection-0x665bf80-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-06T10:19:21,068 DEBUG [hconnection-0x499711c5-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-06T10:19:21,069 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:56944, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-06T10:19:21,069 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:56948, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-06T10:19:21,071 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33169 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-06T10:19:21,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33169 {}] procedure2.ProcedureExecutor(1098): Stored pid=163, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=163, table=TestAcidGuarantees 2024-12-06T10:19:21,072 DEBUG [hconnection-0x3310a69c-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-06T10:19:21,072 DEBUG [hconnection-0x459d5d0-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-06T10:19:21,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=163 2024-12-06T10:19:21,073 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:56956, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-06T10:19:21,073 DEBUG [hconnection-0x731016eb-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-06T10:19:21,073 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:56962, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-06T10:19:21,074 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:56968, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-06T10:19:21,074 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=163, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=163, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-06T10:19:21,074 DEBUG [hconnection-0x7e3f4059-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-06T10:19:21,075 DEBUG [hconnection-0x5fbebee6-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-06T10:19:21,075 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:56982, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-06T10:19:21,076 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=163, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=163, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-06T10:19:21,076 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=164, ppid=163, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-06T10:19:21,076 DEBUG [hconnection-0x5920e9df-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-06T10:19:21,077 DEBUG [hconnection-0x7306aa7-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-06T10:19:21,077 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:57004, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-06T10:19:21,077 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:56996, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-06T10:19:21,079 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:57006, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-06T10:19:21,082 DEBUG [hconnection-0x14f3e774-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-06T10:19:21,084 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:57012, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-06T10:19:21,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(8581): Flush requested on 7b4c4e64c91c50413b4c0cd97a01bcb8 2024-12-06T10:19:21,086 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 7b4c4e64c91c50413b4c0cd97a01bcb8 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-12-06T10:19:21,089 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7b4c4e64c91c50413b4c0cd97a01bcb8, store=A 2024-12-06T10:19:21,089 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:19:21,089 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7b4c4e64c91c50413b4c0cd97a01bcb8, store=B 2024-12-06T10:19:21,089 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:19:21,089 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7b4c4e64c91c50413b4c0cd97a01bcb8, store=C 2024-12-06T10:19:21,089 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:19:21,120 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:21,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 6 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57012 deadline: 1733480421113, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:21,121 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:21,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57006 deadline: 1733480421117, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:21,122 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:21,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57004 deadline: 1733480421117, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:21,123 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:21,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56996 deadline: 1733480421120, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:21,123 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:21,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56948 deadline: 1733480421120, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:21,127 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241206f32e33bc489b4f30841b9d396d8f0f44_7b4c4e64c91c50413b4c0cd97a01bcb8 is 50, key is test_row_0/A:col10/1733480361079/Put/seqid=0 2024-12-06T10:19:21,145 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742445_1621 (size=12154) 2024-12-06T10:19:21,147 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:19:21,152 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241206f32e33bc489b4f30841b9d396d8f0f44_7b4c4e64c91c50413b4c0cd97a01bcb8 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241206f32e33bc489b4f30841b9d396d8f0f44_7b4c4e64c91c50413b4c0cd97a01bcb8 2024-12-06T10:19:21,153 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/.tmp/A/f1b5dbfc6b944a43a26c5c8b5e6d5632, store: [table=TestAcidGuarantees family=A region=7b4c4e64c91c50413b4c0cd97a01bcb8] 2024-12-06T10:19:21,154 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/.tmp/A/f1b5dbfc6b944a43a26c5c8b5e6d5632 is 175, key is test_row_0/A:col10/1733480361079/Put/seqid=0 2024-12-06T10:19:21,173 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742446_1622 (size=30955) 2024-12-06T10:19:21,174 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=163 2024-12-06T10:19:21,174 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=17, memsize=22.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/.tmp/A/f1b5dbfc6b944a43a26c5c8b5e6d5632 2024-12-06T10:19:21,213 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/.tmp/B/965fbea9298740cc869b9a4b02d4a093 is 50, key is test_row_0/B:col10/1733480361079/Put/seqid=0 2024-12-06T10:19:21,223 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:21,223 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57012 deadline: 1733480421221, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:21,223 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:21,223 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57006 deadline: 1733480421222, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:21,225 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:21,226 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57004 deadline: 1733480421224, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:21,226 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:21,226 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56948 deadline: 1733480421224, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:21,226 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:21,226 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56996 deadline: 1733480421224, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:21,228 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,33397,1733480204743 2024-12-06T10:19:21,228 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33397 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=164 2024-12-06T10:19:21,228 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8. 2024-12-06T10:19:21,228 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8. as already flushing 2024-12-06T10:19:21,228 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8. 2024-12-06T10:19:21,228 ERROR [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] handler.RSProcedureHandler(58): pid=164 java.io.IOException: Unable to complete flush {ENCODED => 7b4c4e64c91c50413b4c0cd97a01bcb8, NAME => 'TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:19:21,229 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=164 java.io.IOException: Unable to complete flush {ENCODED => 7b4c4e64c91c50413b4c0cd97a01bcb8, NAME => 'TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:19:21,229 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33169 {}] master.HMaster(4114): Remote procedure failed, pid=164 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7b4c4e64c91c50413b4c0cd97a01bcb8, NAME => 'TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7b4c4e64c91c50413b4c0cd97a01bcb8, NAME => 'TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:19:21,241 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742447_1623 (size=12001) 2024-12-06T10:19:21,242 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=17 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/.tmp/B/965fbea9298740cc869b9a4b02d4a093 2024-12-06T10:19:21,275 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/.tmp/C/26da14df03c44b82aad240b4260275fa is 50, key is test_row_0/C:col10/1733480361079/Put/seqid=0 2024-12-06T10:19:21,280 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742448_1624 (size=12001) 2024-12-06T10:19:21,281 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=17 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/.tmp/C/26da14df03c44b82aad240b4260275fa 2024-12-06T10:19:21,291 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/.tmp/A/f1b5dbfc6b944a43a26c5c8b5e6d5632 as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/A/f1b5dbfc6b944a43a26c5c8b5e6d5632 2024-12-06T10:19:21,297 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/A/f1b5dbfc6b944a43a26c5c8b5e6d5632, entries=150, sequenceid=17, filesize=30.2 K 2024-12-06T10:19:21,300 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/.tmp/B/965fbea9298740cc869b9a4b02d4a093 as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/B/965fbea9298740cc869b9a4b02d4a093 2024-12-06T10:19:21,304 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/B/965fbea9298740cc869b9a4b02d4a093, entries=150, sequenceid=17, filesize=11.7 K 2024-12-06T10:19:21,305 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/.tmp/C/26da14df03c44b82aad240b4260275fa as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/C/26da14df03c44b82aad240b4260275fa 2024-12-06T10:19:21,309 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/C/26da14df03c44b82aad240b4260275fa, entries=150, sequenceid=17, filesize=11.7 K 2024-12-06T10:19:21,311 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=140.89 KB/144270 for 7b4c4e64c91c50413b4c0cd97a01bcb8 in 225ms, sequenceid=17, compaction requested=false 2024-12-06T10:19:21,311 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 7b4c4e64c91c50413b4c0cd97a01bcb8: 2024-12-06T10:19:21,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=163 2024-12-06T10:19:21,380 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,33397,1733480204743 2024-12-06T10:19:21,381 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33397 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=164 2024-12-06T10:19:21,381 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8. 2024-12-06T10:19:21,381 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HRegion(2837): Flushing 7b4c4e64c91c50413b4c0cd97a01bcb8 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-12-06T10:19:21,381 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7b4c4e64c91c50413b4c0cd97a01bcb8, store=A 2024-12-06T10:19:21,381 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:19:21,381 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7b4c4e64c91c50413b4c0cd97a01bcb8, store=B 2024-12-06T10:19:21,382 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:19:21,382 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7b4c4e64c91c50413b4c0cd97a01bcb8, store=C 2024-12-06T10:19:21,382 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:19:21,410 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241206bfc6daba86e14a0787e51a10f3271b2a_7b4c4e64c91c50413b4c0cd97a01bcb8 is 50, key is test_row_0/A:col10/1733480361116/Put/seqid=0 2024-12-06T10:19:21,427 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8. as already flushing 2024-12-06T10:19:21,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(8581): Flush requested on 7b4c4e64c91c50413b4c0cd97a01bcb8 2024-12-06T10:19:21,445 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742449_1625 (size=12154) 2024-12-06T10:19:21,466 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:21,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57012 deadline: 1733480421433, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:21,466 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:21,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56996 deadline: 1733480421434, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:21,469 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:21,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57004 deadline: 1733480421466, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:21,469 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:21,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56948 deadline: 1733480421466, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:21,469 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:21,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57006 deadline: 1733480421466, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:21,568 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:21,568 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:21,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57012 deadline: 1733480421567, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:21,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56996 deadline: 1733480421567, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:21,571 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:21,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57004 deadline: 1733480421570, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:21,572 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:21,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56948 deadline: 1733480421570, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:21,572 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:21,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57006 deadline: 1733480421570, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:21,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=163 2024-12-06T10:19:21,771 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:21,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57012 deadline: 1733480421769, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:21,771 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:21,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56996 deadline: 1733480421770, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:21,774 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:21,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56948 deadline: 1733480421772, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:21,774 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:21,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57004 deadline: 1733480421773, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:21,775 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:21,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57006 deadline: 1733480421774, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:21,845 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:19:21,849 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241206bfc6daba86e14a0787e51a10f3271b2a_7b4c4e64c91c50413b4c0cd97a01bcb8 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241206bfc6daba86e14a0787e51a10f3271b2a_7b4c4e64c91c50413b4c0cd97a01bcb8 2024-12-06T10:19:21,850 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/.tmp/A/9246c144fff44629bafdda6a7387eef4, store: [table=TestAcidGuarantees family=A region=7b4c4e64c91c50413b4c0cd97a01bcb8] 2024-12-06T10:19:21,851 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/.tmp/A/9246c144fff44629bafdda6a7387eef4 is 175, key is test_row_0/A:col10/1733480361116/Put/seqid=0 2024-12-06T10:19:21,862 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742450_1626 (size=30955) 2024-12-06T10:19:22,075 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:22,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57012 deadline: 1733480422074, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:22,075 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:22,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56996 deadline: 1733480422074, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:22,077 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:22,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57004 deadline: 1733480422076, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:22,077 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:22,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57006 deadline: 1733480422076, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:22,077 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:22,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56948 deadline: 1733480422076, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:22,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=163 2024-12-06T10:19:22,263 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=41, memsize=47.0 K, hasBloomFilter=true, into tmp file hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/.tmp/A/9246c144fff44629bafdda6a7387eef4 2024-12-06T10:19:22,270 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/.tmp/B/74ff1341db0640b38fbace278747a395 is 50, key is test_row_0/B:col10/1733480361116/Put/seqid=0 2024-12-06T10:19:22,273 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742451_1627 (size=12001) 2024-12-06T10:19:22,579 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:22,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57004 deadline: 1733480422579, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:22,580 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:22,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56996 deadline: 1733480422579, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:22,582 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:22,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57012 deadline: 1733480422580, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:22,584 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:22,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57006 deadline: 1733480422582, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:22,585 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:22,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56948 deadline: 1733480422583, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:22,674 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=41 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/.tmp/B/74ff1341db0640b38fbace278747a395 2024-12-06T10:19:22,681 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/.tmp/C/31b58417249a4cf3951d50eceabff774 is 50, key is test_row_0/C:col10/1733480361116/Put/seqid=0 2024-12-06T10:19:22,696 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742452_1628 (size=12001) 2024-12-06T10:19:22,752 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-12-06T10:19:23,097 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=41 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/.tmp/C/31b58417249a4cf3951d50eceabff774 2024-12-06T10:19:23,100 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/.tmp/A/9246c144fff44629bafdda6a7387eef4 as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/A/9246c144fff44629bafdda6a7387eef4 2024-12-06T10:19:23,104 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/A/9246c144fff44629bafdda6a7387eef4, entries=150, sequenceid=41, filesize=30.2 K 2024-12-06T10:19:23,104 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/.tmp/B/74ff1341db0640b38fbace278747a395 as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/B/74ff1341db0640b38fbace278747a395 2024-12-06T10:19:23,109 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/B/74ff1341db0640b38fbace278747a395, entries=150, sequenceid=41, filesize=11.7 K 2024-12-06T10:19:23,110 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/.tmp/C/31b58417249a4cf3951d50eceabff774 as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/C/31b58417249a4cf3951d50eceabff774 2024-12-06T10:19:23,113 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/C/31b58417249a4cf3951d50eceabff774, entries=150, sequenceid=41, filesize=11.7 K 2024-12-06T10:19:23,114 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=73.80 KB/75570 for 7b4c4e64c91c50413b4c0cd97a01bcb8 in 1733ms, sequenceid=41, compaction requested=false 2024-12-06T10:19:23,114 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HRegion(2538): Flush status journal for 7b4c4e64c91c50413b4c0cd97a01bcb8: 2024-12-06T10:19:23,114 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8. 2024-12-06T10:19:23,114 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=164 2024-12-06T10:19:23,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33169 {}] master.HMaster(4106): Remote procedure done, pid=164 2024-12-06T10:19:23,116 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=164, resume processing ppid=163 2024-12-06T10:19:23,116 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=164, ppid=163, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.0390 sec 2024-12-06T10:19:23,118 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=163, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=163, table=TestAcidGuarantees in 2.0460 sec 2024-12-06T10:19:23,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=163 2024-12-06T10:19:23,177 INFO [Thread-2725 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 163 completed 2024-12-06T10:19:23,178 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33169 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-06T10:19:23,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33169 {}] procedure2.ProcedureExecutor(1098): Stored pid=165, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=165, table=TestAcidGuarantees 2024-12-06T10:19:23,180 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=165 2024-12-06T10:19:23,180 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=165, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=165, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-06T10:19:23,180 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=165, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=165, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-06T10:19:23,181 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=166, ppid=165, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-06T10:19:23,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=165 2024-12-06T10:19:23,332 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,33397,1733480204743 2024-12-06T10:19:23,332 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33397 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=166 2024-12-06T10:19:23,333 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8. 2024-12-06T10:19:23,333 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HRegion(2837): Flushing 7b4c4e64c91c50413b4c0cd97a01bcb8 3/3 column families, dataSize=73.80 KB heapSize=194.11 KB 2024-12-06T10:19:23,333 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7b4c4e64c91c50413b4c0cd97a01bcb8, store=A 2024-12-06T10:19:23,333 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:19:23,333 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7b4c4e64c91c50413b4c0cd97a01bcb8, store=B 2024-12-06T10:19:23,333 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:19:23,333 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7b4c4e64c91c50413b4c0cd97a01bcb8, store=C 2024-12-06T10:19:23,333 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:19:23,339 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241206d1f20d87b88f462db801c821ee8b8cd9_7b4c4e64c91c50413b4c0cd97a01bcb8 is 50, key is test_row_0/A:col10/1733480361437/Put/seqid=0 2024-12-06T10:19:23,343 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742453_1629 (size=12154) 2024-12-06T10:19:23,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=165 2024-12-06T10:19:23,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(8581): Flush requested on 7b4c4e64c91c50413b4c0cd97a01bcb8 2024-12-06T10:19:23,587 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8. as already flushing 2024-12-06T10:19:23,605 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:23,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56948 deadline: 1733480423601, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:23,605 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:23,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56996 deadline: 1733480423601, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:23,605 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:23,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57006 deadline: 1733480423602, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:23,606 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:23,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57004 deadline: 1733480423603, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:23,607 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:23,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57012 deadline: 1733480423604, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:23,708 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:23,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56996 deadline: 1733480423706, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:23,708 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:23,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56948 deadline: 1733480423706, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:23,708 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:23,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57006 deadline: 1733480423706, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:23,709 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:23,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57004 deadline: 1733480423707, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:23,710 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:23,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57012 deadline: 1733480423709, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:23,744 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:19:23,754 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241206d1f20d87b88f462db801c821ee8b8cd9_7b4c4e64c91c50413b4c0cd97a01bcb8 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241206d1f20d87b88f462db801c821ee8b8cd9_7b4c4e64c91c50413b4c0cd97a01bcb8 2024-12-06T10:19:23,755 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/.tmp/A/23ef566befe84409b1bdb3489884c46d, store: [table=TestAcidGuarantees family=A region=7b4c4e64c91c50413b4c0cd97a01bcb8] 2024-12-06T10:19:23,755 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/.tmp/A/23ef566befe84409b1bdb3489884c46d is 175, key is test_row_0/A:col10/1733480361437/Put/seqid=0 2024-12-06T10:19:23,766 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742454_1630 (size=30955) 2024-12-06T10:19:23,772 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=55, memsize=24.6 K, hasBloomFilter=true, into tmp file hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/.tmp/A/23ef566befe84409b1bdb3489884c46d 2024-12-06T10:19:23,779 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/.tmp/B/2acf90690380429ca7d7a68e703e1dd7 is 50, key is test_row_0/B:col10/1733480361437/Put/seqid=0 2024-12-06T10:19:23,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=165 2024-12-06T10:19:23,783 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742455_1631 (size=12001) 2024-12-06T10:19:23,911 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:23,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56948 deadline: 1733480423909, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:23,911 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:23,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57006 deadline: 1733480423909, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:23,912 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:23,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56996 deadline: 1733480423910, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:23,912 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:23,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57004 deadline: 1733480423910, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:23,912 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:23,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57012 deadline: 1733480423912, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:24,184 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=55 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/.tmp/B/2acf90690380429ca7d7a68e703e1dd7 2024-12-06T10:19:24,190 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/.tmp/C/b57c8b92a4724f54b2c02477712d692b is 50, key is test_row_0/C:col10/1733480361437/Put/seqid=0 2024-12-06T10:19:24,194 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742456_1632 (size=12001) 2024-12-06T10:19:24,214 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:24,214 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57006 deadline: 1733480424212, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:24,215 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:24,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56996 deadline: 1733480424213, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:24,216 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:24,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57012 deadline: 1733480424213, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:24,216 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:24,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56948 deadline: 1733480424214, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:24,216 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:24,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57004 deadline: 1733480424215, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:24,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=165 2024-12-06T10:19:24,594 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=55 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/.tmp/C/b57c8b92a4724f54b2c02477712d692b 2024-12-06T10:19:24,598 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/.tmp/A/23ef566befe84409b1bdb3489884c46d as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/A/23ef566befe84409b1bdb3489884c46d 2024-12-06T10:19:24,601 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/A/23ef566befe84409b1bdb3489884c46d, entries=150, sequenceid=55, filesize=30.2 K 2024-12-06T10:19:24,602 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/.tmp/B/2acf90690380429ca7d7a68e703e1dd7 as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/B/2acf90690380429ca7d7a68e703e1dd7 2024-12-06T10:19:24,606 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/B/2acf90690380429ca7d7a68e703e1dd7, entries=150, sequenceid=55, filesize=11.7 K 2024-12-06T10:19:24,606 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/.tmp/C/b57c8b92a4724f54b2c02477712d692b as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/C/b57c8b92a4724f54b2c02477712d692b 2024-12-06T10:19:24,609 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/C/b57c8b92a4724f54b2c02477712d692b, entries=150, sequenceid=55, filesize=11.7 K 2024-12-06T10:19:24,610 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HRegion(3040): Finished flush of dataSize ~73.80 KB/75570, heapSize ~194.06 KB/198720, currentSize=127.47 KB/130530 for 7b4c4e64c91c50413b4c0cd97a01bcb8 in 1277ms, sequenceid=55, compaction requested=true 2024-12-06T10:19:24,610 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HRegion(2538): Flush status journal for 7b4c4e64c91c50413b4c0cd97a01bcb8: 2024-12-06T10:19:24,610 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8. 2024-12-06T10:19:24,610 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=166 2024-12-06T10:19:24,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33169 {}] master.HMaster(4106): Remote procedure done, pid=166 2024-12-06T10:19:24,612 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=166, resume processing ppid=165 2024-12-06T10:19:24,612 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=166, ppid=165, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.4300 sec 2024-12-06T10:19:24,613 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=165, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=165, table=TestAcidGuarantees in 1.4350 sec 2024-12-06T10:19:24,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(8581): Flush requested on 7b4c4e64c91c50413b4c0cd97a01bcb8 2024-12-06T10:19:24,717 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 7b4c4e64c91c50413b4c0cd97a01bcb8 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-12-06T10:19:24,717 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7b4c4e64c91c50413b4c0cd97a01bcb8, store=A 2024-12-06T10:19:24,717 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:19:24,717 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7b4c4e64c91c50413b4c0cd97a01bcb8, store=B 2024-12-06T10:19:24,717 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:19:24,717 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7b4c4e64c91c50413b4c0cd97a01bcb8, store=C 2024-12-06T10:19:24,717 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:19:24,724 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024120624fec424247c457095ae81b05841bab3_7b4c4e64c91c50413b4c0cd97a01bcb8 is 50, key is test_row_0/A:col10/1733480363603/Put/seqid=0 2024-12-06T10:19:24,728 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742457_1633 (size=12154) 2024-12-06T10:19:24,730 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:24,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57004 deadline: 1733480424726, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:24,731 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:24,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57012 deadline: 1733480424727, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:24,731 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:24,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56948 deadline: 1733480424728, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:24,733 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:24,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57006 deadline: 1733480424730, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:24,734 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:24,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56996 deadline: 1733480424730, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:24,833 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:24,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57004 deadline: 1733480424831, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:24,834 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:24,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57012 deadline: 1733480424832, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:24,834 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:24,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56948 deadline: 1733480424832, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:24,836 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:24,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57006 deadline: 1733480424834, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:24,837 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:24,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56996 deadline: 1733480424835, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:25,037 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:25,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57012 deadline: 1733480425035, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:25,037 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:25,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56948 deadline: 1733480425035, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:25,037 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:25,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57004 deadline: 1733480425035, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:25,040 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:25,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56996 deadline: 1733480425038, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:25,040 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:25,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57006 deadline: 1733480425038, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:25,129 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:19:25,133 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024120624fec424247c457095ae81b05841bab3_7b4c4e64c91c50413b4c0cd97a01bcb8 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024120624fec424247c457095ae81b05841bab3_7b4c4e64c91c50413b4c0cd97a01bcb8 2024-12-06T10:19:25,136 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/.tmp/A/7310a28789df41299915f976d0a4c078, store: [table=TestAcidGuarantees family=A region=7b4c4e64c91c50413b4c0cd97a01bcb8] 2024-12-06T10:19:25,137 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/.tmp/A/7310a28789df41299915f976d0a4c078 is 175, key is test_row_0/A:col10/1733480363603/Put/seqid=0 2024-12-06T10:19:25,166 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742458_1634 (size=30955) 2024-12-06T10:19:25,284 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=165 2024-12-06T10:19:25,284 INFO [Thread-2725 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 165 completed 2024-12-06T10:19:25,285 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33169 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-06T10:19:25,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33169 {}] procedure2.ProcedureExecutor(1098): Stored pid=167, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=167, table=TestAcidGuarantees 2024-12-06T10:19:25,287 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=167, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=167, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-06T10:19:25,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=167 2024-12-06T10:19:25,287 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=167, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=167, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-06T10:19:25,287 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=168, ppid=167, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-06T10:19:25,341 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:25,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56948 deadline: 1733480425339, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:25,342 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:25,342 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:25,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57004 deadline: 1733480425340, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:25,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57012 deadline: 1733480425340, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:25,343 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:25,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56996 deadline: 1733480425342, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:25,346 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:25,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57006 deadline: 1733480425343, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:25,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=167 2024-12-06T10:19:25,439 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,33397,1733480204743 2024-12-06T10:19:25,439 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33397 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=168 2024-12-06T10:19:25,439 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8. 2024-12-06T10:19:25,440 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8. as already flushing 2024-12-06T10:19:25,440 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8. 2024-12-06T10:19:25,440 ERROR [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] handler.RSProcedureHandler(58): pid=168 java.io.IOException: Unable to complete flush {ENCODED => 7b4c4e64c91c50413b4c0cd97a01bcb8, NAME => 'TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:19:25,440 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=168 java.io.IOException: Unable to complete flush {ENCODED => 7b4c4e64c91c50413b4c0cd97a01bcb8, NAME => 'TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:19:25,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33169 {}] master.HMaster(4114): Remote procedure failed, pid=168 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7b4c4e64c91c50413b4c0cd97a01bcb8, NAME => 'TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7b4c4e64c91c50413b4c0cd97a01bcb8, NAME => 'TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:19:25,567 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=78, memsize=44.7 K, hasBloomFilter=true, into tmp file hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/.tmp/A/7310a28789df41299915f976d0a4c078 2024-12-06T10:19:25,573 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/.tmp/B/fce1ff2e5f4845d4b5c32a745525eab9 is 50, key is test_row_0/B:col10/1733480363603/Put/seqid=0 2024-12-06T10:19:25,576 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742459_1635 (size=12001) 2024-12-06T10:19:25,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=167 2024-12-06T10:19:25,593 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,33397,1733480204743 2024-12-06T10:19:25,593 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33397 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=168 2024-12-06T10:19:25,593 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8. 2024-12-06T10:19:25,593 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8. as already flushing 2024-12-06T10:19:25,593 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8. 2024-12-06T10:19:25,593 ERROR [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] handler.RSProcedureHandler(58): pid=168 java.io.IOException: Unable to complete flush {ENCODED => 7b4c4e64c91c50413b4c0cd97a01bcb8, NAME => 'TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:19:25,594 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=168 java.io.IOException: Unable to complete flush {ENCODED => 7b4c4e64c91c50413b4c0cd97a01bcb8, NAME => 'TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:19:25,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33169 {}] master.HMaster(4114): Remote procedure failed, pid=168 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7b4c4e64c91c50413b4c0cd97a01bcb8, NAME => 'TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7b4c4e64c91c50413b4c0cd97a01bcb8, NAME => 'TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:19:25,745 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,33397,1733480204743 2024-12-06T10:19:25,746 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33397 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=168 2024-12-06T10:19:25,746 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8. 2024-12-06T10:19:25,746 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8. as already flushing 2024-12-06T10:19:25,746 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8. 2024-12-06T10:19:25,746 ERROR [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] handler.RSProcedureHandler(58): pid=168 java.io.IOException: Unable to complete flush {ENCODED => 7b4c4e64c91c50413b4c0cd97a01bcb8, NAME => 'TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:19:25,746 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=168 java.io.IOException: Unable to complete flush {ENCODED => 7b4c4e64c91c50413b4c0cd97a01bcb8, NAME => 'TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:19:25,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33169 {}] master.HMaster(4114): Remote procedure failed, pid=168 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7b4c4e64c91c50413b4c0cd97a01bcb8, NAME => 'TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7b4c4e64c91c50413b4c0cd97a01bcb8, NAME => 'TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:19:25,844 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:25,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56948 deadline: 1733480425842, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:25,846 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:25,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57012 deadline: 1733480425844, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:25,849 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:25,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57004 deadline: 1733480425847, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:25,849 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:25,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56996 deadline: 1733480425848, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:25,852 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:25,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57006 deadline: 1733480425851, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:25,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=167 2024-12-06T10:19:25,898 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,33397,1733480204743 2024-12-06T10:19:25,898 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33397 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=168 2024-12-06T10:19:25,898 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8. 2024-12-06T10:19:25,899 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8. as already flushing 2024-12-06T10:19:25,899 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8. 2024-12-06T10:19:25,899 ERROR [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] handler.RSProcedureHandler(58): pid=168 java.io.IOException: Unable to complete flush {ENCODED => 7b4c4e64c91c50413b4c0cd97a01bcb8, NAME => 'TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:19:25,899 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=168 java.io.IOException: Unable to complete flush {ENCODED => 7b4c4e64c91c50413b4c0cd97a01bcb8, NAME => 'TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:19:25,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33169 {}] master.HMaster(4114): Remote procedure failed, pid=168 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7b4c4e64c91c50413b4c0cd97a01bcb8, NAME => 'TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7b4c4e64c91c50413b4c0cd97a01bcb8, NAME => 'TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:19:25,977 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=78 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/.tmp/B/fce1ff2e5f4845d4b5c32a745525eab9 2024-12-06T10:19:25,983 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/.tmp/C/9836089d406e4b9bb99c0bab5cc6b064 is 50, key is test_row_0/C:col10/1733480363603/Put/seqid=0 2024-12-06T10:19:25,986 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742460_1636 (size=12001) 2024-12-06T10:19:25,986 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=78 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/.tmp/C/9836089d406e4b9bb99c0bab5cc6b064 2024-12-06T10:19:25,990 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/.tmp/A/7310a28789df41299915f976d0a4c078 as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/A/7310a28789df41299915f976d0a4c078 2024-12-06T10:19:25,992 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/A/7310a28789df41299915f976d0a4c078, entries=150, sequenceid=78, filesize=30.2 K 2024-12-06T10:19:25,993 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/.tmp/B/fce1ff2e5f4845d4b5c32a745525eab9 as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/B/fce1ff2e5f4845d4b5c32a745525eab9 2024-12-06T10:19:25,997 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/B/fce1ff2e5f4845d4b5c32a745525eab9, entries=150, sequenceid=78, filesize=11.7 K 2024-12-06T10:19:25,998 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/.tmp/C/9836089d406e4b9bb99c0bab5cc6b064 as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/C/9836089d406e4b9bb99c0bab5cc6b064 2024-12-06T10:19:26,001 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/C/9836089d406e4b9bb99c0bab5cc6b064, entries=150, sequenceid=78, filesize=11.7 K 2024-12-06T10:19:26,002 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=67.09 KB/68700 for 7b4c4e64c91c50413b4c0cd97a01bcb8 in 1285ms, sequenceid=78, compaction requested=true 2024-12-06T10:19:26,002 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 7b4c4e64c91c50413b4c0cd97a01bcb8: 2024-12-06T10:19:26,002 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 7b4c4e64c91c50413b4c0cd97a01bcb8:A, priority=-2147483648, current under compaction store size is 1 2024-12-06T10:19:26,002 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T10:19:26,002 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 7b4c4e64c91c50413b4c0cd97a01bcb8:B, priority=-2147483648, current under compaction store size is 2 2024-12-06T10:19:26,002 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-06T10:19:26,002 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T10:19:26,002 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 7b4c4e64c91c50413b4c0cd97a01bcb8:C, priority=-2147483648, current under compaction store size is 3 2024-12-06T10:19:26,002 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-06T10:19:26,002 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-06T10:19:26,004 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 123820 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-06T10:19:26,004 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48004 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-06T10:19:26,004 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HStore(1540): 7b4c4e64c91c50413b4c0cd97a01bcb8/A is initiating minor compaction (all files) 2024-12-06T10:19:26,004 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HStore(1540): 7b4c4e64c91c50413b4c0cd97a01bcb8/B is initiating minor compaction (all files) 2024-12-06T10:19:26,004 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 7b4c4e64c91c50413b4c0cd97a01bcb8/A in TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8. 2024-12-06T10:19:26,004 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 7b4c4e64c91c50413b4c0cd97a01bcb8/B in TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8. 2024-12-06T10:19:26,004 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/A/f1b5dbfc6b944a43a26c5c8b5e6d5632, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/A/9246c144fff44629bafdda6a7387eef4, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/A/23ef566befe84409b1bdb3489884c46d, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/A/7310a28789df41299915f976d0a4c078] into tmpdir=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/.tmp, totalSize=120.9 K 2024-12-06T10:19:26,004 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/B/965fbea9298740cc869b9a4b02d4a093, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/B/74ff1341db0640b38fbace278747a395, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/B/2acf90690380429ca7d7a68e703e1dd7, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/B/fce1ff2e5f4845d4b5c32a745525eab9] into tmpdir=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/.tmp, totalSize=46.9 K 2024-12-06T10:19:26,004 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=12 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8. 2024-12-06T10:19:26,004 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8. files: [hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/A/f1b5dbfc6b944a43a26c5c8b5e6d5632, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/A/9246c144fff44629bafdda6a7387eef4, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/A/23ef566befe84409b1bdb3489884c46d, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/A/7310a28789df41299915f976d0a4c078] 2024-12-06T10:19:26,005 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.Compactor(224): Compacting 965fbea9298740cc869b9a4b02d4a093, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=17, earliestPutTs=1733480361079 2024-12-06T10:19:26,005 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.Compactor(224): Compacting f1b5dbfc6b944a43a26c5c8b5e6d5632, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=17, earliestPutTs=1733480361079 2024-12-06T10:19:26,005 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.Compactor(224): Compacting 74ff1341db0640b38fbace278747a395, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=41, earliestPutTs=1733480361109 2024-12-06T10:19:26,005 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.Compactor(224): Compacting 9246c144fff44629bafdda6a7387eef4, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=41, earliestPutTs=1733480361109 2024-12-06T10:19:26,005 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.Compactor(224): Compacting 2acf90690380429ca7d7a68e703e1dd7, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=55, earliestPutTs=1733480361433 2024-12-06T10:19:26,006 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.Compactor(224): Compacting 23ef566befe84409b1bdb3489884c46d, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=55, earliestPutTs=1733480361433 2024-12-06T10:19:26,006 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.Compactor(224): Compacting fce1ff2e5f4845d4b5c32a745525eab9, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=78, earliestPutTs=1733480363600 2024-12-06T10:19:26,006 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.Compactor(224): Compacting 7310a28789df41299915f976d0a4c078, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=78, earliestPutTs=1733480363600 2024-12-06T10:19:26,014 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=7b4c4e64c91c50413b4c0cd97a01bcb8] 2024-12-06T10:19:26,017 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 7b4c4e64c91c50413b4c0cd97a01bcb8#B#compaction#543 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-06T10:19:26,018 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/.tmp/B/318db86340304353b65bb18350242ce7 is 50, key is test_row_0/B:col10/1733480363603/Put/seqid=0 2024-12-06T10:19:26,022 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241206f4844c0c412e4a22be9483623401ef3a_7b4c4e64c91c50413b4c0cd97a01bcb8 store=[table=TestAcidGuarantees family=A region=7b4c4e64c91c50413b4c0cd97a01bcb8] 2024-12-06T10:19:26,023 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241206f4844c0c412e4a22be9483623401ef3a_7b4c4e64c91c50413b4c0cd97a01bcb8, store=[table=TestAcidGuarantees family=A region=7b4c4e64c91c50413b4c0cd97a01bcb8] 2024-12-06T10:19:26,024 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241206f4844c0c412e4a22be9483623401ef3a_7b4c4e64c91c50413b4c0cd97a01bcb8 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=7b4c4e64c91c50413b4c0cd97a01bcb8] 2024-12-06T10:19:26,027 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742461_1637 (size=12139) 2024-12-06T10:19:26,037 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742462_1638 (size=4469) 2024-12-06T10:19:26,051 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,33397,1733480204743 2024-12-06T10:19:26,051 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33397 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=168 2024-12-06T10:19:26,051 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8. 2024-12-06T10:19:26,051 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HRegion(2837): Flushing 7b4c4e64c91c50413b4c0cd97a01bcb8 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-12-06T10:19:26,051 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7b4c4e64c91c50413b4c0cd97a01bcb8, store=A 2024-12-06T10:19:26,051 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:19:26,052 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7b4c4e64c91c50413b4c0cd97a01bcb8, store=B 2024-12-06T10:19:26,052 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:19:26,052 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7b4c4e64c91c50413b4c0cd97a01bcb8, store=C 2024-12-06T10:19:26,052 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:19:26,058 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412066049de116b494ccbb42600a872ebfd3e_7b4c4e64c91c50413b4c0cd97a01bcb8 is 50, key is test_row_0/A:col10/1733480364727/Put/seqid=0 2024-12-06T10:19:26,068 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742463_1639 (size=12154) 2024-12-06T10:19:26,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=167 2024-12-06T10:19:26,432 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/.tmp/B/318db86340304353b65bb18350242ce7 as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/B/318db86340304353b65bb18350242ce7 2024-12-06T10:19:26,436 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 7b4c4e64c91c50413b4c0cd97a01bcb8/B of 7b4c4e64c91c50413b4c0cd97a01bcb8 into 318db86340304353b65bb18350242ce7(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-06T10:19:26,436 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 7b4c4e64c91c50413b4c0cd97a01bcb8: 2024-12-06T10:19:26,437 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8., storeName=7b4c4e64c91c50413b4c0cd97a01bcb8/B, priority=12, startTime=1733480366002; duration=0sec 2024-12-06T10:19:26,437 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-06T10:19:26,437 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 7b4c4e64c91c50413b4c0cd97a01bcb8:B 2024-12-06T10:19:26,437 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-06T10:19:26,438 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 7b4c4e64c91c50413b4c0cd97a01bcb8#A#compaction#542 average throughput is 0.06 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-06T10:19:26,439 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/.tmp/A/9401937f16034e12a4848ce8547897a2 is 175, key is test_row_0/A:col10/1733480363603/Put/seqid=0 2024-12-06T10:19:26,440 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48004 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-06T10:19:26,440 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HStore(1540): 7b4c4e64c91c50413b4c0cd97a01bcb8/C is initiating minor compaction (all files) 2024-12-06T10:19:26,440 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 7b4c4e64c91c50413b4c0cd97a01bcb8/C in TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8. 2024-12-06T10:19:26,440 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/C/26da14df03c44b82aad240b4260275fa, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/C/31b58417249a4cf3951d50eceabff774, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/C/b57c8b92a4724f54b2c02477712d692b, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/C/9836089d406e4b9bb99c0bab5cc6b064] into tmpdir=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/.tmp, totalSize=46.9 K 2024-12-06T10:19:26,448 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.Compactor(224): Compacting 26da14df03c44b82aad240b4260275fa, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=17, earliestPutTs=1733480361079 2024-12-06T10:19:26,448 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.Compactor(224): Compacting 31b58417249a4cf3951d50eceabff774, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=41, earliestPutTs=1733480361109 2024-12-06T10:19:26,449 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.Compactor(224): Compacting b57c8b92a4724f54b2c02477712d692b, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=55, earliestPutTs=1733480361433 2024-12-06T10:19:26,449 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.Compactor(224): Compacting 9836089d406e4b9bb99c0bab5cc6b064, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=78, earliestPutTs=1733480363600 2024-12-06T10:19:26,465 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742464_1640 (size=31093) 2024-12-06T10:19:26,468 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:19:26,471 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/.tmp/A/9401937f16034e12a4848ce8547897a2 as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/A/9401937f16034e12a4848ce8547897a2 2024-12-06T10:19:26,473 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412066049de116b494ccbb42600a872ebfd3e_7b4c4e64c91c50413b4c0cd97a01bcb8 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412066049de116b494ccbb42600a872ebfd3e_7b4c4e64c91c50413b4c0cd97a01bcb8 2024-12-06T10:19:26,474 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/.tmp/A/c1888d4732ea4881bcbed55ff89cb1ec, store: [table=TestAcidGuarantees family=A region=7b4c4e64c91c50413b4c0cd97a01bcb8] 2024-12-06T10:19:26,474 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/.tmp/A/c1888d4732ea4881bcbed55ff89cb1ec is 175, key is test_row_0/A:col10/1733480364727/Put/seqid=0 2024-12-06T10:19:26,476 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 7b4c4e64c91c50413b4c0cd97a01bcb8/A of 7b4c4e64c91c50413b4c0cd97a01bcb8 into 9401937f16034e12a4848ce8547897a2(size=30.4 K), total size for store is 30.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-06T10:19:26,476 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 7b4c4e64c91c50413b4c0cd97a01bcb8: 2024-12-06T10:19:26,477 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8., storeName=7b4c4e64c91c50413b4c0cd97a01bcb8/A, priority=12, startTime=1733480366002; duration=0sec 2024-12-06T10:19:26,477 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T10:19:26,477 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 7b4c4e64c91c50413b4c0cd97a01bcb8:A 2024-12-06T10:19:26,479 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 7b4c4e64c91c50413b4c0cd97a01bcb8#C#compaction#545 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-06T10:19:26,479 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/.tmp/C/74f58e385dc040fab52121c0ec493686 is 50, key is test_row_0/C:col10/1733480363603/Put/seqid=0 2024-12-06T10:19:26,490 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742465_1641 (size=30955) 2024-12-06T10:19:26,492 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742466_1642 (size=12139) 2024-12-06T10:19:26,492 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=91, memsize=22.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/.tmp/A/c1888d4732ea4881bcbed55ff89cb1ec 2024-12-06T10:19:26,498 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/.tmp/C/74f58e385dc040fab52121c0ec493686 as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/C/74f58e385dc040fab52121c0ec493686 2024-12-06T10:19:26,499 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/.tmp/B/4f0a94191a8e4f609bcb61b4bb0a196f is 50, key is test_row_0/B:col10/1733480364727/Put/seqid=0 2024-12-06T10:19:26,504 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 7b4c4e64c91c50413b4c0cd97a01bcb8/C of 7b4c4e64c91c50413b4c0cd97a01bcb8 into 74f58e385dc040fab52121c0ec493686(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-06T10:19:26,504 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 7b4c4e64c91c50413b4c0cd97a01bcb8: 2024-12-06T10:19:26,504 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8., storeName=7b4c4e64c91c50413b4c0cd97a01bcb8/C, priority=12, startTime=1733480366002; duration=0sec 2024-12-06T10:19:26,504 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T10:19:26,504 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 7b4c4e64c91c50413b4c0cd97a01bcb8:C 2024-12-06T10:19:26,510 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742467_1643 (size=12001) 2024-12-06T10:19:26,846 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8. as already flushing 2024-12-06T10:19:26,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(8581): Flush requested on 7b4c4e64c91c50413b4c0cd97a01bcb8 2024-12-06T10:19:26,890 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:26,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57012 deadline: 1733480426887, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:26,891 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:26,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56996 deadline: 1733480426888, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:26,891 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:26,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57006 deadline: 1733480426888, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:26,891 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:26,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57004 deadline: 1733480426889, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:26,891 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:26,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56948 deadline: 1733480426889, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:26,911 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=91 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/.tmp/B/4f0a94191a8e4f609bcb61b4bb0a196f 2024-12-06T10:19:26,916 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/.tmp/C/5c9538a0decd47a891fb13dc1b4f7663 is 50, key is test_row_0/C:col10/1733480364727/Put/seqid=0 2024-12-06T10:19:26,919 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742468_1644 (size=12001) 2024-12-06T10:19:26,993 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:26,993 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57012 deadline: 1733480426992, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:26,994 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:26,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56996 deadline: 1733480426992, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:26,994 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:26,994 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:26,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57006 deadline: 1733480426993, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:26,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56948 deadline: 1733480426992, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:26,994 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:26,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57004 deadline: 1733480426993, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:27,196 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:27,197 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56948 deadline: 1733480427195, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:27,197 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:27,197 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57012 deadline: 1733480427195, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:27,197 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:27,197 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56996 deadline: 1733480427196, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:27,197 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:27,197 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57006 deadline: 1733480427196, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:27,197 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:27,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57004 deadline: 1733480427196, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:27,320 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=91 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/.tmp/C/5c9538a0decd47a891fb13dc1b4f7663 2024-12-06T10:19:27,324 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/.tmp/A/c1888d4732ea4881bcbed55ff89cb1ec as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/A/c1888d4732ea4881bcbed55ff89cb1ec 2024-12-06T10:19:27,328 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/A/c1888d4732ea4881bcbed55ff89cb1ec, entries=150, sequenceid=91, filesize=30.2 K 2024-12-06T10:19:27,328 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/.tmp/B/4f0a94191a8e4f609bcb61b4bb0a196f as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/B/4f0a94191a8e4f609bcb61b4bb0a196f 2024-12-06T10:19:27,335 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/B/4f0a94191a8e4f609bcb61b4bb0a196f, entries=150, sequenceid=91, filesize=11.7 K 2024-12-06T10:19:27,336 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/.tmp/C/5c9538a0decd47a891fb13dc1b4f7663 as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/C/5c9538a0decd47a891fb13dc1b4f7663 2024-12-06T10:19:27,340 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/C/5c9538a0decd47a891fb13dc1b4f7663, entries=150, sequenceid=91, filesize=11.7 K 2024-12-06T10:19:27,341 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=134.18 KB/137400 for 7b4c4e64c91c50413b4c0cd97a01bcb8 in 1290ms, sequenceid=91, compaction requested=false 2024-12-06T10:19:27,341 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HRegion(2538): Flush status journal for 7b4c4e64c91c50413b4c0cd97a01bcb8: 2024-12-06T10:19:27,341 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8. 2024-12-06T10:19:27,341 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=168 2024-12-06T10:19:27,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33169 {}] master.HMaster(4106): Remote procedure done, pid=168 2024-12-06T10:19:27,343 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=168, resume processing ppid=167 2024-12-06T10:19:27,344 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=168, ppid=167, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.0550 sec 2024-12-06T10:19:27,345 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=167, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=167, table=TestAcidGuarantees in 2.0580 sec 2024-12-06T10:19:27,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=167 2024-12-06T10:19:27,391 INFO [Thread-2725 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 167 completed 2024-12-06T10:19:27,392 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33169 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-06T10:19:27,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33169 {}] procedure2.ProcedureExecutor(1098): Stored pid=169, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=169, table=TestAcidGuarantees 2024-12-06T10:19:27,393 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=169, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=169, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-06T10:19:27,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=169 2024-12-06T10:19:27,394 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=169, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=169, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-06T10:19:27,394 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=170, ppid=169, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-06T10:19:27,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=169 2024-12-06T10:19:27,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(8581): Flush requested on 7b4c4e64c91c50413b4c0cd97a01bcb8 2024-12-06T10:19:27,500 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 7b4c4e64c91c50413b4c0cd97a01bcb8 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-12-06T10:19:27,501 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7b4c4e64c91c50413b4c0cd97a01bcb8, store=A 2024-12-06T10:19:27,501 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:19:27,501 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7b4c4e64c91c50413b4c0cd97a01bcb8, store=B 2024-12-06T10:19:27,501 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:19:27,501 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7b4c4e64c91c50413b4c0cd97a01bcb8, store=C 2024-12-06T10:19:27,501 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:19:27,509 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:27,509 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:27,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57004 deadline: 1733480427506, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:27,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57012 deadline: 1733480427504, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:27,512 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:27,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56948 deadline: 1733480427509, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:27,512 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:27,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56996 deadline: 1733480427509, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:27,512 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:27,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57006 deadline: 1733480427509, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:27,514 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024120660908016222f4f8596891169b5d91000_7b4c4e64c91c50413b4c0cd97a01bcb8 is 50, key is test_row_0/A:col10/1733480366888/Put/seqid=0 2024-12-06T10:19:27,518 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742469_1645 (size=12154) 2024-12-06T10:19:27,520 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:19:27,524 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024120660908016222f4f8596891169b5d91000_7b4c4e64c91c50413b4c0cd97a01bcb8 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024120660908016222f4f8596891169b5d91000_7b4c4e64c91c50413b4c0cd97a01bcb8 2024-12-06T10:19:27,525 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/.tmp/A/bf57646105824ae1be2c6a682a0918b4, store: [table=TestAcidGuarantees family=A region=7b4c4e64c91c50413b4c0cd97a01bcb8] 2024-12-06T10:19:27,526 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/.tmp/A/bf57646105824ae1be2c6a682a0918b4 is 175, key is test_row_0/A:col10/1733480366888/Put/seqid=0 2024-12-06T10:19:27,530 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742470_1646 (size=30955) 2024-12-06T10:19:27,531 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=118, memsize=47.0 K, hasBloomFilter=true, into tmp file hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/.tmp/A/bf57646105824ae1be2c6a682a0918b4 2024-12-06T10:19:27,546 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,33397,1733480204743 2024-12-06T10:19:27,546 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33397 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=170 2024-12-06T10:19:27,546 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8. 2024-12-06T10:19:27,547 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8. as already flushing 2024-12-06T10:19:27,547 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8. 2024-12-06T10:19:27,547 ERROR [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=170}] handler.RSProcedureHandler(58): pid=170 java.io.IOException: Unable to complete flush {ENCODED => 7b4c4e64c91c50413b4c0cd97a01bcb8, NAME => 'TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:19:27,547 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=170 java.io.IOException: Unable to complete flush {ENCODED => 7b4c4e64c91c50413b4c0cd97a01bcb8, NAME => 'TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:19:27,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33169 {}] master.HMaster(4114): Remote procedure failed, pid=170 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7b4c4e64c91c50413b4c0cd97a01bcb8, NAME => 'TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7b4c4e64c91c50413b4c0cd97a01bcb8, NAME => 'TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:19:27,553 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/.tmp/B/ae007bc2044b4754aa8a9b81cab4b26d is 50, key is test_row_0/B:col10/1733480366888/Put/seqid=0 2024-12-06T10:19:27,557 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742471_1647 (size=12001) 2024-12-06T10:19:27,558 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=118 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/.tmp/B/ae007bc2044b4754aa8a9b81cab4b26d 2024-12-06T10:19:27,564 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/.tmp/C/a2a3446154084f8cab590263e738b47f is 50, key is test_row_0/C:col10/1733480366888/Put/seqid=0 2024-12-06T10:19:27,567 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742472_1648 (size=12001) 2024-12-06T10:19:27,568 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=118 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/.tmp/C/a2a3446154084f8cab590263e738b47f 2024-12-06T10:19:27,572 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/.tmp/A/bf57646105824ae1be2c6a682a0918b4 as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/A/bf57646105824ae1be2c6a682a0918b4 2024-12-06T10:19:27,575 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/A/bf57646105824ae1be2c6a682a0918b4, entries=150, sequenceid=118, filesize=30.2 K 2024-12-06T10:19:27,575 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/.tmp/B/ae007bc2044b4754aa8a9b81cab4b26d as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/B/ae007bc2044b4754aa8a9b81cab4b26d 2024-12-06T10:19:27,578 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/B/ae007bc2044b4754aa8a9b81cab4b26d, entries=150, sequenceid=118, filesize=11.7 K 2024-12-06T10:19:27,579 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/.tmp/C/a2a3446154084f8cab590263e738b47f as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/C/a2a3446154084f8cab590263e738b47f 2024-12-06T10:19:27,581 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/C/a2a3446154084f8cab590263e738b47f, entries=150, sequenceid=118, filesize=11.7 K 2024-12-06T10:19:27,582 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=60.38 KB/61830 for 7b4c4e64c91c50413b4c0cd97a01bcb8 in 82ms, sequenceid=118, compaction requested=true 2024-12-06T10:19:27,582 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 7b4c4e64c91c50413b4c0cd97a01bcb8: 2024-12-06T10:19:27,582 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 7b4c4e64c91c50413b4c0cd97a01bcb8:A, priority=-2147483648, current under compaction store size is 1 2024-12-06T10:19:27,582 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T10:19:27,582 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-06T10:19:27,582 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 7b4c4e64c91c50413b4c0cd97a01bcb8:B, priority=-2147483648, current under compaction store size is 2 2024-12-06T10:19:27,582 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T10:19:27,583 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 7b4c4e64c91c50413b4c0cd97a01bcb8:C, priority=-2147483648, current under compaction store size is 3 2024-12-06T10:19:27,583 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-06T10:19:27,583 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-06T10:19:27,584 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 93003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-06T10:19:27,584 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HStore(1540): 7b4c4e64c91c50413b4c0cd97a01bcb8/A is initiating minor compaction (all files) 2024-12-06T10:19:27,584 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 7b4c4e64c91c50413b4c0cd97a01bcb8/A in TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8. 2024-12-06T10:19:27,584 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36141 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-06T10:19:27,584 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/A/9401937f16034e12a4848ce8547897a2, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/A/c1888d4732ea4881bcbed55ff89cb1ec, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/A/bf57646105824ae1be2c6a682a0918b4] into tmpdir=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/.tmp, totalSize=90.8 K 2024-12-06T10:19:27,584 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HStore(1540): 7b4c4e64c91c50413b4c0cd97a01bcb8/B is initiating minor compaction (all files) 2024-12-06T10:19:27,584 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8. 2024-12-06T10:19:27,584 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 7b4c4e64c91c50413b4c0cd97a01bcb8/B in TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8. 2024-12-06T10:19:27,584 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8. files: [hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/A/9401937f16034e12a4848ce8547897a2, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/A/c1888d4732ea4881bcbed55ff89cb1ec, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/A/bf57646105824ae1be2c6a682a0918b4] 2024-12-06T10:19:27,584 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/B/318db86340304353b65bb18350242ce7, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/B/4f0a94191a8e4f609bcb61b4bb0a196f, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/B/ae007bc2044b4754aa8a9b81cab4b26d] into tmpdir=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/.tmp, totalSize=35.3 K 2024-12-06T10:19:27,584 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.Compactor(224): Compacting 318db86340304353b65bb18350242ce7, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=78, earliestPutTs=1733480363600 2024-12-06T10:19:27,584 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.Compactor(224): Compacting 9401937f16034e12a4848ce8547897a2, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=78, earliestPutTs=1733480363600 2024-12-06T10:19:27,584 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.Compactor(224): Compacting 4f0a94191a8e4f609bcb61b4bb0a196f, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=91, earliestPutTs=1733480364727 2024-12-06T10:19:27,584 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.Compactor(224): Compacting c1888d4732ea4881bcbed55ff89cb1ec, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=91, earliestPutTs=1733480364727 2024-12-06T10:19:27,585 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.Compactor(224): Compacting ae007bc2044b4754aa8a9b81cab4b26d, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=118, earliestPutTs=1733480366885 2024-12-06T10:19:27,585 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.Compactor(224): Compacting bf57646105824ae1be2c6a682a0918b4, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=118, earliestPutTs=1733480366885 2024-12-06T10:19:27,590 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=7b4c4e64c91c50413b4c0cd97a01bcb8] 2024-12-06T10:19:27,591 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 7b4c4e64c91c50413b4c0cd97a01bcb8#B#compaction#551 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-06T10:19:27,591 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/.tmp/B/580a7d4d87f24596a104af357208de64 is 50, key is test_row_0/B:col10/1733480366888/Put/seqid=0 2024-12-06T10:19:27,593 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e2024120657b78cbd20784171ba1ccaf1a3403854_7b4c4e64c91c50413b4c0cd97a01bcb8 store=[table=TestAcidGuarantees family=A region=7b4c4e64c91c50413b4c0cd97a01bcb8] 2024-12-06T10:19:27,595 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e2024120657b78cbd20784171ba1ccaf1a3403854_7b4c4e64c91c50413b4c0cd97a01bcb8, store=[table=TestAcidGuarantees family=A region=7b4c4e64c91c50413b4c0cd97a01bcb8] 2024-12-06T10:19:27,595 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024120657b78cbd20784171ba1ccaf1a3403854_7b4c4e64c91c50413b4c0cd97a01bcb8 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=7b4c4e64c91c50413b4c0cd97a01bcb8] 2024-12-06T10:19:27,598 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742473_1649 (size=12241) 2024-12-06T10:19:27,601 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/.tmp/B/580a7d4d87f24596a104af357208de64 as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/B/580a7d4d87f24596a104af357208de64 2024-12-06T10:19:27,605 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 7b4c4e64c91c50413b4c0cd97a01bcb8/B of 7b4c4e64c91c50413b4c0cd97a01bcb8 into 580a7d4d87f24596a104af357208de64(size=12.0 K), total size for store is 12.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-06T10:19:27,605 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 7b4c4e64c91c50413b4c0cd97a01bcb8: 2024-12-06T10:19:27,605 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8., storeName=7b4c4e64c91c50413b4c0cd97a01bcb8/B, priority=13, startTime=1733480367582; duration=0sec 2024-12-06T10:19:27,605 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-06T10:19:27,605 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 7b4c4e64c91c50413b4c0cd97a01bcb8:B 2024-12-06T10:19:27,605 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-06T10:19:27,606 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36141 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-06T10:19:27,606 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HStore(1540): 7b4c4e64c91c50413b4c0cd97a01bcb8/C is initiating minor compaction (all files) 2024-12-06T10:19:27,606 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 7b4c4e64c91c50413b4c0cd97a01bcb8/C in TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8. 2024-12-06T10:19:27,606 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/C/74f58e385dc040fab52121c0ec493686, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/C/5c9538a0decd47a891fb13dc1b4f7663, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/C/a2a3446154084f8cab590263e738b47f] into tmpdir=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/.tmp, totalSize=35.3 K 2024-12-06T10:19:27,606 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.Compactor(224): Compacting 74f58e385dc040fab52121c0ec493686, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=78, earliestPutTs=1733480363600 2024-12-06T10:19:27,606 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.Compactor(224): Compacting 5c9538a0decd47a891fb13dc1b4f7663, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=91, earliestPutTs=1733480364727 2024-12-06T10:19:27,607 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.Compactor(224): Compacting a2a3446154084f8cab590263e738b47f, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=118, earliestPutTs=1733480366885 2024-12-06T10:19:27,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(8581): Flush requested on 7b4c4e64c91c50413b4c0cd97a01bcb8 2024-12-06T10:19:27,612 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 7b4c4e64c91c50413b4c0cd97a01bcb8 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-12-06T10:19:27,613 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7b4c4e64c91c50413b4c0cd97a01bcb8, store=A 2024-12-06T10:19:27,613 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:19:27,613 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7b4c4e64c91c50413b4c0cd97a01bcb8, store=B 2024-12-06T10:19:27,613 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:19:27,614 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7b4c4e64c91c50413b4c0cd97a01bcb8, store=C 2024-12-06T10:19:27,614 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:19:27,614 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 7b4c4e64c91c50413b4c0cd97a01bcb8#C#compaction#553 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-06T10:19:27,614 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/.tmp/C/47c087f5b34a4c91ae360ed1b1448e7c is 50, key is test_row_0/C:col10/1733480366888/Put/seqid=0 2024-12-06T10:19:27,616 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742474_1650 (size=4469) 2024-12-06T10:19:27,620 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241206790f2528d9724facb8a45b1c194da347_7b4c4e64c91c50413b4c0cd97a01bcb8 is 50, key is test_row_0/A:col10/1733480367504/Put/seqid=0 2024-12-06T10:19:27,623 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742476_1652 (size=12254) 2024-12-06T10:19:27,624 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742475_1651 (size=12241) 2024-12-06T10:19:27,632 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:27,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56948 deadline: 1733480427629, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:27,633 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:27,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57012 deadline: 1733480427629, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:27,633 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:27,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57006 deadline: 1733480427630, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:27,633 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:27,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57004 deadline: 1733480427631, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:27,635 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:27,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56996 deadline: 1733480427633, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:27,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=169 2024-12-06T10:19:27,698 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,33397,1733480204743 2024-12-06T10:19:27,698 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33397 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=170 2024-12-06T10:19:27,698 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8. 2024-12-06T10:19:27,699 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8. as already flushing 2024-12-06T10:19:27,699 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8. 2024-12-06T10:19:27,699 ERROR [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] handler.RSProcedureHandler(58): pid=170 java.io.IOException: Unable to complete flush {ENCODED => 7b4c4e64c91c50413b4c0cd97a01bcb8, NAME => 'TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:19:27,699 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=170 java.io.IOException: Unable to complete flush {ENCODED => 7b4c4e64c91c50413b4c0cd97a01bcb8, NAME => 'TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:19:27,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33169 {}] master.HMaster(4114): Remote procedure failed, pid=170 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7b4c4e64c91c50413b4c0cd97a01bcb8, NAME => 'TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7b4c4e64c91c50413b4c0cd97a01bcb8, NAME => 'TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:19:27,735 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:27,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 73 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56948 deadline: 1733480427734, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:27,735 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:27,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57012 deadline: 1733480427734, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:27,736 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:27,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57006 deadline: 1733480427734, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:27,736 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:27,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57004 deadline: 1733480427734, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:27,738 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:27,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56996 deadline: 1733480427736, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:27,851 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,33397,1733480204743 2024-12-06T10:19:27,851 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33397 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=170 2024-12-06T10:19:27,851 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8. 2024-12-06T10:19:27,851 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8. as already flushing 2024-12-06T10:19:27,852 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8. 2024-12-06T10:19:27,852 ERROR [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] handler.RSProcedureHandler(58): pid=170 java.io.IOException: Unable to complete flush {ENCODED => 7b4c4e64c91c50413b4c0cd97a01bcb8, NAME => 'TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:19:27,852 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=170 java.io.IOException: Unable to complete flush {ENCODED => 7b4c4e64c91c50413b4c0cd97a01bcb8, NAME => 'TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:19:27,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33169 {}] master.HMaster(4114): Remote procedure failed, pid=170 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7b4c4e64c91c50413b4c0cd97a01bcb8, NAME => 'TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7b4c4e64c91c50413b4c0cd97a01bcb8, NAME => 'TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:19:27,937 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:27,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 75 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56948 deadline: 1733480427936, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:27,940 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:27,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57012 deadline: 1733480427938, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:27,940 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:27,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57004 deadline: 1733480427938, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:27,940 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:27,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57006 deadline: 1733480427938, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:27,942 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:27,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56996 deadline: 1733480427940, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:27,996 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=169 2024-12-06T10:19:28,003 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,33397,1733480204743 2024-12-06T10:19:28,003 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33397 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=170 2024-12-06T10:19:28,004 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8. 2024-12-06T10:19:28,004 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8. as already flushing 2024-12-06T10:19:28,004 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8. 2024-12-06T10:19:28,004 ERROR [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=170}] handler.RSProcedureHandler(58): pid=170 java.io.IOException: Unable to complete flush {ENCODED => 7b4c4e64c91c50413b4c0cd97a01bcb8, NAME => 'TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:19:28,004 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=170 java.io.IOException: Unable to complete flush {ENCODED => 7b4c4e64c91c50413b4c0cd97a01bcb8, NAME => 'TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:19:28,005 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33169 {}] master.HMaster(4114): Remote procedure failed, pid=170 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7b4c4e64c91c50413b4c0cd97a01bcb8, NAME => 'TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7b4c4e64c91c50413b4c0cd97a01bcb8, NAME => 'TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:19:28,018 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 7b4c4e64c91c50413b4c0cd97a01bcb8#A#compaction#552 average throughput is 0.06 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-06T10:19:28,019 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/.tmp/A/a810c086858942feb968bad54b0f8fbe is 175, key is test_row_0/A:col10/1733480366888/Put/seqid=0 2024-12-06T10:19:28,023 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742477_1653 (size=31195) 2024-12-06T10:19:28,024 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:19:28,029 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241206790f2528d9724facb8a45b1c194da347_7b4c4e64c91c50413b4c0cd97a01bcb8 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241206790f2528d9724facb8a45b1c194da347_7b4c4e64c91c50413b4c0cd97a01bcb8 2024-12-06T10:19:28,030 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/.tmp/A/06d49e9a567e400cb249e98440c7f424, store: [table=TestAcidGuarantees family=A region=7b4c4e64c91c50413b4c0cd97a01bcb8] 2024-12-06T10:19:28,031 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/.tmp/A/06d49e9a567e400cb249e98440c7f424 is 175, key is test_row_0/A:col10/1733480367504/Put/seqid=0 2024-12-06T10:19:28,031 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/.tmp/C/47c087f5b34a4c91ae360ed1b1448e7c as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/C/47c087f5b34a4c91ae360ed1b1448e7c 2024-12-06T10:19:28,036 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 7b4c4e64c91c50413b4c0cd97a01bcb8/C of 7b4c4e64c91c50413b4c0cd97a01bcb8 into 47c087f5b34a4c91ae360ed1b1448e7c(size=12.0 K), total size for store is 12.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-06T10:19:28,036 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 7b4c4e64c91c50413b4c0cd97a01bcb8: 2024-12-06T10:19:28,036 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8., storeName=7b4c4e64c91c50413b4c0cd97a01bcb8/C, priority=13, startTime=1733480367582; duration=0sec 2024-12-06T10:19:28,036 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T10:19:28,036 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 7b4c4e64c91c50413b4c0cd97a01bcb8:C 2024-12-06T10:19:28,040 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742478_1654 (size=31055) 2024-12-06T10:19:28,156 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,33397,1733480204743 2024-12-06T10:19:28,156 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33397 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=170 2024-12-06T10:19:28,156 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8. 2024-12-06T10:19:28,156 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8. as already flushing 2024-12-06T10:19:28,157 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8. 2024-12-06T10:19:28,157 ERROR [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] handler.RSProcedureHandler(58): pid=170 java.io.IOException: Unable to complete flush {ENCODED => 7b4c4e64c91c50413b4c0cd97a01bcb8, NAME => 'TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:19:28,157 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=170 java.io.IOException: Unable to complete flush {ENCODED => 7b4c4e64c91c50413b4c0cd97a01bcb8, NAME => 'TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:19:28,157 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33169 {}] master.HMaster(4114): Remote procedure failed, pid=170 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7b4c4e64c91c50413b4c0cd97a01bcb8, NAME => 'TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7b4c4e64c91c50413b4c0cd97a01bcb8, NAME => 'TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:19:28,242 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:28,242 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 77 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56948 deadline: 1733480428240, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:28,244 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:28,245 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57004 deadline: 1733480428242, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:28,246 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:28,247 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56996 deadline: 1733480428245, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:28,247 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:28,247 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57012 deadline: 1733480428245, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:28,247 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:28,247 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57006 deadline: 1733480428245, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:28,308 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,33397,1733480204743 2024-12-06T10:19:28,309 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33397 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=170 2024-12-06T10:19:28,309 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8. 2024-12-06T10:19:28,309 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8. as already flushing 2024-12-06T10:19:28,309 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8. 2024-12-06T10:19:28,309 ERROR [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] handler.RSProcedureHandler(58): pid=170 java.io.IOException: Unable to complete flush {ENCODED => 7b4c4e64c91c50413b4c0cd97a01bcb8, NAME => 'TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:19:28,309 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=170 java.io.IOException: Unable to complete flush {ENCODED => 7b4c4e64c91c50413b4c0cd97a01bcb8, NAME => 'TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:19:28,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33169 {}] master.HMaster(4114): Remote procedure failed, pid=170 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7b4c4e64c91c50413b4c0cd97a01bcb8, NAME => 'TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7b4c4e64c91c50413b4c0cd97a01bcb8, NAME => 'TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:19:28,428 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/.tmp/A/a810c086858942feb968bad54b0f8fbe as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/A/a810c086858942feb968bad54b0f8fbe 2024-12-06T10:19:28,432 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 7b4c4e64c91c50413b4c0cd97a01bcb8/A of 7b4c4e64c91c50413b4c0cd97a01bcb8 into a810c086858942feb968bad54b0f8fbe(size=30.5 K), total size for store is 30.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-06T10:19:28,432 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 7b4c4e64c91c50413b4c0cd97a01bcb8: 2024-12-06T10:19:28,432 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8., storeName=7b4c4e64c91c50413b4c0cd97a01bcb8/A, priority=13, startTime=1733480367582; duration=0sec 2024-12-06T10:19:28,432 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T10:19:28,432 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 7b4c4e64c91c50413b4c0cd97a01bcb8:A 2024-12-06T10:19:28,438 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=133, memsize=24.6 K, hasBloomFilter=true, into tmp file hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/.tmp/A/06d49e9a567e400cb249e98440c7f424 2024-12-06T10:19:28,444 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/.tmp/B/de9ce77002464b3e9c6b336f77ec1476 is 50, key is test_row_0/B:col10/1733480367504/Put/seqid=0 2024-12-06T10:19:28,448 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742479_1655 (size=12101) 2024-12-06T10:19:28,449 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=133 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/.tmp/B/de9ce77002464b3e9c6b336f77ec1476 2024-12-06T10:19:28,455 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/.tmp/C/38e4e9970d35450ba2b3e842748e194c is 50, key is test_row_0/C:col10/1733480367504/Put/seqid=0 2024-12-06T10:19:28,458 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742480_1656 (size=12101) 2024-12-06T10:19:28,459 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=133 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/.tmp/C/38e4e9970d35450ba2b3e842748e194c 2024-12-06T10:19:28,461 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,33397,1733480204743 2024-12-06T10:19:28,461 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33397 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=170 2024-12-06T10:19:28,461 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8. 2024-12-06T10:19:28,461 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8. as already flushing 2024-12-06T10:19:28,461 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8. 2024-12-06T10:19:28,462 ERROR [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=170}] handler.RSProcedureHandler(58): pid=170 java.io.IOException: Unable to complete flush {ENCODED => 7b4c4e64c91c50413b4c0cd97a01bcb8, NAME => 'TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:19:28,462 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=170 java.io.IOException: Unable to complete flush {ENCODED => 7b4c4e64c91c50413b4c0cd97a01bcb8, NAME => 'TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:19:28,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33169 {}] master.HMaster(4114): Remote procedure failed, pid=170 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7b4c4e64c91c50413b4c0cd97a01bcb8, NAME => 'TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7b4c4e64c91c50413b4c0cd97a01bcb8, NAME => 'TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:19:28,463 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/.tmp/A/06d49e9a567e400cb249e98440c7f424 as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/A/06d49e9a567e400cb249e98440c7f424 2024-12-06T10:19:28,467 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/A/06d49e9a567e400cb249e98440c7f424, entries=150, sequenceid=133, filesize=30.3 K 2024-12-06T10:19:28,468 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/.tmp/B/de9ce77002464b3e9c6b336f77ec1476 as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/B/de9ce77002464b3e9c6b336f77ec1476 2024-12-06T10:19:28,472 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/B/de9ce77002464b3e9c6b336f77ec1476, entries=150, sequenceid=133, filesize=11.8 K 2024-12-06T10:19:28,472 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/.tmp/C/38e4e9970d35450ba2b3e842748e194c as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/C/38e4e9970d35450ba2b3e842748e194c 2024-12-06T10:19:28,475 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/C/38e4e9970d35450ba2b3e842748e194c, entries=150, sequenceid=133, filesize=11.8 K 2024-12-06T10:19:28,476 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~73.80 KB/75570, heapSize ~194.06 KB/198720, currentSize=127.47 KB/130530 for 7b4c4e64c91c50413b4c0cd97a01bcb8 in 864ms, sequenceid=133, compaction requested=false 2024-12-06T10:19:28,476 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 7b4c4e64c91c50413b4c0cd97a01bcb8: 2024-12-06T10:19:28,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=169 2024-12-06T10:19:28,614 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,33397,1733480204743 2024-12-06T10:19:28,614 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33397 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=170 2024-12-06T10:19:28,614 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8. 2024-12-06T10:19:28,615 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.HRegion(2837): Flushing 7b4c4e64c91c50413b4c0cd97a01bcb8 3/3 column families, dataSize=127.47 KB heapSize=334.73 KB 2024-12-06T10:19:28,615 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7b4c4e64c91c50413b4c0cd97a01bcb8, store=A 2024-12-06T10:19:28,615 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:19:28,615 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7b4c4e64c91c50413b4c0cd97a01bcb8, store=B 2024-12-06T10:19:28,615 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:19:28,615 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7b4c4e64c91c50413b4c0cd97a01bcb8, store=C 2024-12-06T10:19:28,615 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:19:28,621 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412061759df2738904bffaafa006c17772b36_7b4c4e64c91c50413b4c0cd97a01bcb8 is 50, key is test_row_0/A:col10/1733480367630/Put/seqid=0 2024-12-06T10:19:28,633 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742481_1657 (size=12304) 2024-12-06T10:19:28,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(8581): Flush requested on 7b4c4e64c91c50413b4c0cd97a01bcb8 2024-12-06T10:19:28,747 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8. as already flushing 2024-12-06T10:19:28,769 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:28,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56948 deadline: 1733480428766, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:28,770 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:28,770 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:28,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57004 deadline: 1733480428767, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:28,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 75 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57012 deadline: 1733480428767, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:28,772 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:28,772 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:28,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57006 deadline: 1733480428769, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:28,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56996 deadline: 1733480428770, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:28,872 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:28,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56948 deadline: 1733480428870, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:28,872 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:28,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57004 deadline: 1733480428871, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:28,872 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:28,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 77 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57012 deadline: 1733480428871, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:28,875 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:28,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57006 deadline: 1733480428873, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:28,875 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:28,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56996 deadline: 1733480428874, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:29,034 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:19:29,037 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412061759df2738904bffaafa006c17772b36_7b4c4e64c91c50413b4c0cd97a01bcb8 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412061759df2738904bffaafa006c17772b36_7b4c4e64c91c50413b4c0cd97a01bcb8 2024-12-06T10:19:29,038 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/.tmp/A/cf86d8f05b4f48a898287bd08367c2c5, store: [table=TestAcidGuarantees family=A region=7b4c4e64c91c50413b4c0cd97a01bcb8] 2024-12-06T10:19:29,039 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/.tmp/A/cf86d8f05b4f48a898287bd08367c2c5 is 175, key is test_row_0/A:col10/1733480367630/Put/seqid=0 2024-12-06T10:19:29,044 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742482_1658 (size=31105) 2024-12-06T10:19:29,044 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=157, memsize=42.5 K, hasBloomFilter=true, into tmp file hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/.tmp/A/cf86d8f05b4f48a898287bd08367c2c5 2024-12-06T10:19:29,052 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/.tmp/B/bc8e69329e06421d9b3df9c5c67a61ab is 50, key is test_row_0/B:col10/1733480367630/Put/seqid=0 2024-12-06T10:19:29,055 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742483_1659 (size=12151) 2024-12-06T10:19:29,056 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=157 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/.tmp/B/bc8e69329e06421d9b3df9c5c67a61ab 2024-12-06T10:19:29,065 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/.tmp/C/706c0fcd9e3f4f269c9ab64704065d08 is 50, key is test_row_0/C:col10/1733480367630/Put/seqid=0 2024-12-06T10:19:29,070 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742484_1660 (size=12151) 2024-12-06T10:19:29,071 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=157 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/.tmp/C/706c0fcd9e3f4f269c9ab64704065d08 2024-12-06T10:19:29,075 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:29,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56948 deadline: 1733480429073, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:29,075 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:29,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57004 deadline: 1733480429073, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:29,076 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:29,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 79 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57012 deadline: 1733480429074, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:29,077 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/.tmp/A/cf86d8f05b4f48a898287bd08367c2c5 as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/A/cf86d8f05b4f48a898287bd08367c2c5 2024-12-06T10:19:29,078 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:29,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57006 deadline: 1733480429076, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:29,078 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:29,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56996 deadline: 1733480429077, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:29,081 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/A/cf86d8f05b4f48a898287bd08367c2c5, entries=150, sequenceid=157, filesize=30.4 K 2024-12-06T10:19:29,082 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/.tmp/B/bc8e69329e06421d9b3df9c5c67a61ab as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/B/bc8e69329e06421d9b3df9c5c67a61ab 2024-12-06T10:19:29,085 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/B/bc8e69329e06421d9b3df9c5c67a61ab, entries=150, sequenceid=157, filesize=11.9 K 2024-12-06T10:19:29,086 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/.tmp/C/706c0fcd9e3f4f269c9ab64704065d08 as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/C/706c0fcd9e3f4f269c9ab64704065d08 2024-12-06T10:19:29,089 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/C/706c0fcd9e3f4f269c9ab64704065d08, entries=150, sequenceid=157, filesize=11.9 K 2024-12-06T10:19:29,090 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.HRegion(3040): Finished flush of dataSize ~127.47 KB/130530, heapSize ~334.69 KB/342720, currentSize=80.51 KB/82440 for 7b4c4e64c91c50413b4c0cd97a01bcb8 in 476ms, sequenceid=157, compaction requested=true 2024-12-06T10:19:29,090 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.HRegion(2538): Flush status journal for 7b4c4e64c91c50413b4c0cd97a01bcb8: 2024-12-06T10:19:29,090 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8. 2024-12-06T10:19:29,090 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=170 2024-12-06T10:19:29,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33169 {}] master.HMaster(4106): Remote procedure done, pid=170 2024-12-06T10:19:29,092 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=170, resume processing ppid=169 2024-12-06T10:19:29,092 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=170, ppid=169, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.6970 sec 2024-12-06T10:19:29,093 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=169, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=169, table=TestAcidGuarantees in 1.7000 sec 2024-12-06T10:19:29,379 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(8581): Flush requested on 7b4c4e64c91c50413b4c0cd97a01bcb8 2024-12-06T10:19:29,379 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 7b4c4e64c91c50413b4c0cd97a01bcb8 3/3 column families, dataSize=87.22 KB heapSize=229.27 KB 2024-12-06T10:19:29,380 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7b4c4e64c91c50413b4c0cd97a01bcb8, store=A 2024-12-06T10:19:29,380 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:19:29,380 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7b4c4e64c91c50413b4c0cd97a01bcb8, store=B 2024-12-06T10:19:29,380 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:19:29,380 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7b4c4e64c91c50413b4c0cd97a01bcb8, store=C 2024-12-06T10:19:29,380 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:19:29,388 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241206b9e2cbb3af304cecb8e2061b2a399023_7b4c4e64c91c50413b4c0cd97a01bcb8 is 50, key is test_row_0/A:col10/1733480369378/Put/seqid=0 2024-12-06T10:19:29,393 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742485_1661 (size=14794) 2024-12-06T10:19:29,399 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:29,399 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56996 deadline: 1733480429395, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:29,400 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:29,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57012 deadline: 1733480429396, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:29,400 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:29,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57006 deadline: 1733480429396, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:29,400 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:29,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56948 deadline: 1733480429397, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:29,403 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:29,403 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57004 deadline: 1733480429399, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:29,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=169 2024-12-06T10:19:29,500 INFO [Thread-2725 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 169 completed 2024-12-06T10:19:29,501 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33169 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-06T10:19:29,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33169 {}] procedure2.ProcedureExecutor(1098): Stored pid=171, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=171, table=TestAcidGuarantees 2024-12-06T10:19:29,502 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:29,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57012 deadline: 1733480429501, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:29,502 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:29,502 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:29,502 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=171, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=171, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-06T10:19:29,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=171 2024-12-06T10:19:29,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 95 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56948 deadline: 1733480429502, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:29,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 87 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56996 deadline: 1733480429501, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:29,503 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:29,503 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=171, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=171, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-06T10:19:29,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 87 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57006 deadline: 1733480429503, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:29,503 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=172, ppid=171, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-06T10:19:29,507 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:29,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57004 deadline: 1733480429505, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:29,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=171 2024-12-06T10:19:29,655 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,33397,1733480204743 2024-12-06T10:19:29,655 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33397 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=172 2024-12-06T10:19:29,655 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8. 2024-12-06T10:19:29,656 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8. as already flushing 2024-12-06T10:19:29,656 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8. 2024-12-06T10:19:29,656 ERROR [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] handler.RSProcedureHandler(58): pid=172 java.io.IOException: Unable to complete flush {ENCODED => 7b4c4e64c91c50413b4c0cd97a01bcb8, NAME => 'TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:19:29,656 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=172 java.io.IOException: Unable to complete flush {ENCODED => 7b4c4e64c91c50413b4c0cd97a01bcb8, NAME => 'TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:19:29,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33169 {}] master.HMaster(4114): Remote procedure failed, pid=172 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7b4c4e64c91c50413b4c0cd97a01bcb8, NAME => 'TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7b4c4e64c91c50413b4c0cd97a01bcb8, NAME => 'TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:19:29,705 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:29,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 97 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56948 deadline: 1733480429704, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:29,705 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:29,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 89 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56996 deadline: 1733480429704, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:29,706 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:29,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57012 deadline: 1733480429704, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:29,707 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:29,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 89 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57006 deadline: 1733480429705, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:29,710 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:29,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57004 deadline: 1733480429707, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:29,794 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:19:29,797 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241206b9e2cbb3af304cecb8e2061b2a399023_7b4c4e64c91c50413b4c0cd97a01bcb8 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241206b9e2cbb3af304cecb8e2061b2a399023_7b4c4e64c91c50413b4c0cd97a01bcb8 2024-12-06T10:19:29,797 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/.tmp/A/bdddaaf26fc6429d87c4f91d6a43a28c, store: [table=TestAcidGuarantees family=A region=7b4c4e64c91c50413b4c0cd97a01bcb8] 2024-12-06T10:19:29,798 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/.tmp/A/bdddaaf26fc6429d87c4f91d6a43a28c is 175, key is test_row_0/A:col10/1733480369378/Put/seqid=0 2024-12-06T10:19:29,801 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742486_1662 (size=39749) 2024-12-06T10:19:29,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=171 2024-12-06T10:19:29,808 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,33397,1733480204743 2024-12-06T10:19:29,808 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33397 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=172 2024-12-06T10:19:29,808 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8. 2024-12-06T10:19:29,808 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8. as already flushing 2024-12-06T10:19:29,808 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8. 2024-12-06T10:19:29,808 ERROR [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=172}] handler.RSProcedureHandler(58): pid=172 java.io.IOException: Unable to complete flush {ENCODED => 7b4c4e64c91c50413b4c0cd97a01bcb8, NAME => 'TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:19:29,808 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=172 java.io.IOException: Unable to complete flush {ENCODED => 7b4c4e64c91c50413b4c0cd97a01bcb8, NAME => 'TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:19:29,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33169 {}] master.HMaster(4114): Remote procedure failed, pid=172 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7b4c4e64c91c50413b4c0cd97a01bcb8, NAME => 'TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7b4c4e64c91c50413b4c0cd97a01bcb8, NAME => 'TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:19:29,960 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,33397,1733480204743 2024-12-06T10:19:29,961 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33397 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=172 2024-12-06T10:19:29,961 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8. 2024-12-06T10:19:29,961 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8. as already flushing 2024-12-06T10:19:29,961 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8. 2024-12-06T10:19:29,961 ERROR [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] handler.RSProcedureHandler(58): pid=172 java.io.IOException: Unable to complete flush {ENCODED => 7b4c4e64c91c50413b4c0cd97a01bcb8, NAME => 'TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:19:29,961 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=172 java.io.IOException: Unable to complete flush {ENCODED => 7b4c4e64c91c50413b4c0cd97a01bcb8, NAME => 'TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:19:29,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33169 {}] master.HMaster(4114): Remote procedure failed, pid=172 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7b4c4e64c91c50413b4c0cd97a01bcb8, NAME => 'TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7b4c4e64c91c50413b4c0cd97a01bcb8, NAME => 'TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:19:30,009 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:30,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57012 deadline: 1733480430007, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:30,010 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:30,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 91 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56996 deadline: 1733480430007, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:30,010 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:30,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 91 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57006 deadline: 1733480430008, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:30,010 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:30,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 99 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56948 deadline: 1733480430008, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:30,014 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:30,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 92 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57004 deadline: 1733480430012, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:30,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=171 2024-12-06T10:19:30,112 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,33397,1733480204743 2024-12-06T10:19:30,113 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33397 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=172 2024-12-06T10:19:30,113 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8. 2024-12-06T10:19:30,113 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8. as already flushing 2024-12-06T10:19:30,113 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8. 2024-12-06T10:19:30,113 ERROR [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] handler.RSProcedureHandler(58): pid=172 java.io.IOException: Unable to complete flush {ENCODED => 7b4c4e64c91c50413b4c0cd97a01bcb8, NAME => 'TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:19:30,113 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=172 java.io.IOException: Unable to complete flush {ENCODED => 7b4c4e64c91c50413b4c0cd97a01bcb8, NAME => 'TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:19:30,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33169 {}] master.HMaster(4114): Remote procedure failed, pid=172 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7b4c4e64c91c50413b4c0cd97a01bcb8, NAME => 'TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7b4c4e64c91c50413b4c0cd97a01bcb8, NAME => 'TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:19:30,202 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=173, memsize=29.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/.tmp/A/bdddaaf26fc6429d87c4f91d6a43a28c 2024-12-06T10:19:30,209 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/.tmp/B/0307124cc9eb4fe2828d172194dd5c7e is 50, key is test_row_0/B:col10/1733480369378/Put/seqid=0 2024-12-06T10:19:30,212 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742487_1663 (size=12151) 2024-12-06T10:19:30,265 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,33397,1733480204743 2024-12-06T10:19:30,265 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33397 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=172 2024-12-06T10:19:30,266 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8. 2024-12-06T10:19:30,266 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8. as already flushing 2024-12-06T10:19:30,266 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8. 2024-12-06T10:19:30,266 ERROR [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=172}] handler.RSProcedureHandler(58): pid=172 java.io.IOException: Unable to complete flush {ENCODED => 7b4c4e64c91c50413b4c0cd97a01bcb8, NAME => 'TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:19:30,266 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=172 java.io.IOException: Unable to complete flush {ENCODED => 7b4c4e64c91c50413b4c0cd97a01bcb8, NAME => 'TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:19:30,267 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33169 {}] master.HMaster(4114): Remote procedure failed, pid=172 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7b4c4e64c91c50413b4c0cd97a01bcb8, NAME => 'TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7b4c4e64c91c50413b4c0cd97a01bcb8, NAME => 'TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:19:30,419 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,33397,1733480204743 2024-12-06T10:19:30,419 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33397 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=172 2024-12-06T10:19:30,419 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8. 2024-12-06T10:19:30,419 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8. as already flushing 2024-12-06T10:19:30,419 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8. 2024-12-06T10:19:30,419 ERROR [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] handler.RSProcedureHandler(58): pid=172 java.io.IOException: Unable to complete flush {ENCODED => 7b4c4e64c91c50413b4c0cd97a01bcb8, NAME => 'TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:19:30,419 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=172 java.io.IOException: Unable to complete flush {ENCODED => 7b4c4e64c91c50413b4c0cd97a01bcb8, NAME => 'TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:19:30,420 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33169 {}] master.HMaster(4114): Remote procedure failed, pid=172 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7b4c4e64c91c50413b4c0cd97a01bcb8, NAME => 'TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7b4c4e64c91c50413b4c0cd97a01bcb8, NAME => 'TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:19:30,513 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:30,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 92 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57012 deadline: 1733480430511, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:30,513 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:30,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57006 deadline: 1733480430512, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:30,513 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:30,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56996 deadline: 1733480430512, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:30,516 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:30,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 101 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56948 deadline: 1733480430513, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:30,516 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:30,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 94 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57004 deadline: 1733480430516, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:30,571 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,33397,1733480204743 2024-12-06T10:19:30,572 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33397 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=172 2024-12-06T10:19:30,572 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8. 2024-12-06T10:19:30,572 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8. as already flushing 2024-12-06T10:19:30,572 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8. 2024-12-06T10:19:30,572 ERROR [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] handler.RSProcedureHandler(58): pid=172 java.io.IOException: Unable to complete flush {ENCODED => 7b4c4e64c91c50413b4c0cd97a01bcb8, NAME => 'TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:19:30,572 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=172 java.io.IOException: Unable to complete flush {ENCODED => 7b4c4e64c91c50413b4c0cd97a01bcb8, NAME => 'TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:19:30,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33169 {}] master.HMaster(4114): Remote procedure failed, pid=172 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7b4c4e64c91c50413b4c0cd97a01bcb8, NAME => 'TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7b4c4e64c91c50413b4c0cd97a01bcb8, NAME => 'TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:19:30,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=171 2024-12-06T10:19:30,613 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=173 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/.tmp/B/0307124cc9eb4fe2828d172194dd5c7e 2024-12-06T10:19:30,620 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/.tmp/C/3ee830ee3c5f4809be4fe4f04ccbd42f is 50, key is test_row_0/C:col10/1733480369378/Put/seqid=0 2024-12-06T10:19:30,628 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742488_1664 (size=12151) 2024-12-06T10:19:30,728 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,33397,1733480204743 2024-12-06T10:19:30,728 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33397 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=172 2024-12-06T10:19:30,728 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8. 2024-12-06T10:19:30,728 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8. as already flushing 2024-12-06T10:19:30,728 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8. 2024-12-06T10:19:30,728 ERROR [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=172}] handler.RSProcedureHandler(58): pid=172 java.io.IOException: Unable to complete flush {ENCODED => 7b4c4e64c91c50413b4c0cd97a01bcb8, NAME => 'TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:19:30,728 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=172 java.io.IOException: Unable to complete flush {ENCODED => 7b4c4e64c91c50413b4c0cd97a01bcb8, NAME => 'TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:19:30,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33169 {}] master.HMaster(4114): Remote procedure failed, pid=172 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7b4c4e64c91c50413b4c0cd97a01bcb8, NAME => 'TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7b4c4e64c91c50413b4c0cd97a01bcb8, NAME => 'TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:19:30,880 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,33397,1733480204743 2024-12-06T10:19:30,884 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33397 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=172 2024-12-06T10:19:30,884 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8. 2024-12-06T10:19:30,884 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8. as already flushing 2024-12-06T10:19:30,884 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8. 2024-12-06T10:19:30,884 ERROR [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] handler.RSProcedureHandler(58): pid=172 java.io.IOException: Unable to complete flush {ENCODED => 7b4c4e64c91c50413b4c0cd97a01bcb8, NAME => 'TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:19:30,884 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=172 java.io.IOException: Unable to complete flush {ENCODED => 7b4c4e64c91c50413b4c0cd97a01bcb8, NAME => 'TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:19:30,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33169 {}] master.HMaster(4114): Remote procedure failed, pid=172 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7b4c4e64c91c50413b4c0cd97a01bcb8, NAME => 'TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7b4c4e64c91c50413b4c0cd97a01bcb8, NAME => 'TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:19:31,025 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=173 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/.tmp/C/3ee830ee3c5f4809be4fe4f04ccbd42f 2024-12-06T10:19:31,028 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/.tmp/A/bdddaaf26fc6429d87c4f91d6a43a28c as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/A/bdddaaf26fc6429d87c4f91d6a43a28c 2024-12-06T10:19:31,031 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/A/bdddaaf26fc6429d87c4f91d6a43a28c, entries=200, sequenceid=173, filesize=38.8 K 2024-12-06T10:19:31,032 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/.tmp/B/0307124cc9eb4fe2828d172194dd5c7e as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/B/0307124cc9eb4fe2828d172194dd5c7e 2024-12-06T10:19:31,035 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/B/0307124cc9eb4fe2828d172194dd5c7e, entries=150, sequenceid=173, filesize=11.9 K 2024-12-06T10:19:31,036 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/.tmp/C/3ee830ee3c5f4809be4fe4f04ccbd42f as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/C/3ee830ee3c5f4809be4fe4f04ccbd42f 2024-12-06T10:19:31,038 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/C/3ee830ee3c5f4809be4fe4f04ccbd42f, entries=150, sequenceid=173, filesize=11.9 K 2024-12-06T10:19:31,039 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~87.22 KB/89310, heapSize ~229.22 KB/234720, currentSize=114.05 KB/116790 for 7b4c4e64c91c50413b4c0cd97a01bcb8 in 1660ms, sequenceid=173, compaction requested=true 2024-12-06T10:19:31,039 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 7b4c4e64c91c50413b4c0cd97a01bcb8: 2024-12-06T10:19:31,039 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-06T10:19:31,039 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 7b4c4e64c91c50413b4c0cd97a01bcb8:A, priority=-2147483648, current under compaction store size is 1 2024-12-06T10:19:31,039 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T10:19:31,039 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 7b4c4e64c91c50413b4c0cd97a01bcb8:B, priority=-2147483648, current under compaction store size is 2 2024-12-06T10:19:31,039 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T10:19:31,039 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 7b4c4e64c91c50413b4c0cd97a01bcb8:C, priority=-2147483648, current under compaction store size is 3 2024-12-06T10:19:31,039 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-06T10:19:31,040 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-06T10:19:31,040 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 133104 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-06T10:19:31,040 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HStore(1540): 7b4c4e64c91c50413b4c0cd97a01bcb8/A is initiating minor compaction (all files) 2024-12-06T10:19:31,040 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48644 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-06T10:19:31,040 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 7b4c4e64c91c50413b4c0cd97a01bcb8/A in TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8. 2024-12-06T10:19:31,040 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HStore(1540): 7b4c4e64c91c50413b4c0cd97a01bcb8/B is initiating minor compaction (all files) 2024-12-06T10:19:31,040 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 7b4c4e64c91c50413b4c0cd97a01bcb8/B in TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8. 2024-12-06T10:19:31,041 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/B/580a7d4d87f24596a104af357208de64, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/B/de9ce77002464b3e9c6b336f77ec1476, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/B/bc8e69329e06421d9b3df9c5c67a61ab, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/B/0307124cc9eb4fe2828d172194dd5c7e] into tmpdir=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/.tmp, totalSize=47.5 K 2024-12-06T10:19:31,041 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/A/a810c086858942feb968bad54b0f8fbe, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/A/06d49e9a567e400cb249e98440c7f424, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/A/cf86d8f05b4f48a898287bd08367c2c5, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/A/bdddaaf26fc6429d87c4f91d6a43a28c] into tmpdir=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/.tmp, totalSize=130.0 K 2024-12-06T10:19:31,041 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=12 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8. 2024-12-06T10:19:31,041 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8. files: [hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/A/a810c086858942feb968bad54b0f8fbe, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/A/06d49e9a567e400cb249e98440c7f424, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/A/cf86d8f05b4f48a898287bd08367c2c5, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/A/bdddaaf26fc6429d87c4f91d6a43a28c] 2024-12-06T10:19:31,041 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,33397,1733480204743 2024-12-06T10:19:31,042 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.Compactor(224): Compacting 580a7d4d87f24596a104af357208de64, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=118, earliestPutTs=1733480366885 2024-12-06T10:19:31,042 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.Compactor(224): Compacting a810c086858942feb968bad54b0f8fbe, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=118, earliestPutTs=1733480366885 2024-12-06T10:19:31,042 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33397 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=172 2024-12-06T10:19:31,042 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8. 2024-12-06T10:19:31,042 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.HRegion(2837): Flushing 7b4c4e64c91c50413b4c0cd97a01bcb8 3/3 column families, dataSize=114.05 KB heapSize=299.58 KB 2024-12-06T10:19:31,042 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.Compactor(224): Compacting 06d49e9a567e400cb249e98440c7f424, keycount=150, bloomtype=ROW, size=30.3 K, encoding=NONE, compression=NONE, seqNum=133, earliestPutTs=1733480367504 2024-12-06T10:19:31,042 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.Compactor(224): Compacting de9ce77002464b3e9c6b336f77ec1476, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=133, earliestPutTs=1733480367504 2024-12-06T10:19:31,042 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7b4c4e64c91c50413b4c0cd97a01bcb8, store=A 2024-12-06T10:19:31,042 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:19:31,042 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7b4c4e64c91c50413b4c0cd97a01bcb8, store=B 2024-12-06T10:19:31,042 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:19:31,042 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7b4c4e64c91c50413b4c0cd97a01bcb8, store=C 2024-12-06T10:19:31,042 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:19:31,042 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.Compactor(224): Compacting bc8e69329e06421d9b3df9c5c67a61ab, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=157, earliestPutTs=1733480367627 2024-12-06T10:19:31,043 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.Compactor(224): Compacting cf86d8f05b4f48a898287bd08367c2c5, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=157, earliestPutTs=1733480367627 2024-12-06T10:19:31,043 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.Compactor(224): Compacting 0307124cc9eb4fe2828d172194dd5c7e, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=173, earliestPutTs=1733480368766 2024-12-06T10:19:31,043 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.Compactor(224): Compacting bdddaaf26fc6429d87c4f91d6a43a28c, keycount=200, bloomtype=ROW, size=38.8 K, encoding=NONE, compression=NONE, seqNum=173, earliestPutTs=1733480368762 2024-12-06T10:19:31,049 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412067713126145f344549b384c37d304998d_7b4c4e64c91c50413b4c0cd97a01bcb8 is 50, key is test_row_0/A:col10/1733480369398/Put/seqid=0 2024-12-06T10:19:31,072 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=7b4c4e64c91c50413b4c0cd97a01bcb8] 2024-12-06T10:19:31,075 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742489_1665 (size=12304) 2024-12-06T10:19:31,078 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 7b4c4e64c91c50413b4c0cd97a01bcb8#B#compaction#565 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-06T10:19:31,079 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/.tmp/B/5df2647ef06344fbbea96e1bbbbd4db9 is 50, key is test_row_0/B:col10/1733480369378/Put/seqid=0 2024-12-06T10:19:31,084 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241206c2ec59db383b489083f6f57499e8a23d_7b4c4e64c91c50413b4c0cd97a01bcb8 store=[table=TestAcidGuarantees family=A region=7b4c4e64c91c50413b4c0cd97a01bcb8] 2024-12-06T10:19:31,087 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241206c2ec59db383b489083f6f57499e8a23d_7b4c4e64c91c50413b4c0cd97a01bcb8, store=[table=TestAcidGuarantees family=A region=7b4c4e64c91c50413b4c0cd97a01bcb8] 2024-12-06T10:19:31,087 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241206c2ec59db383b489083f6f57499e8a23d_7b4c4e64c91c50413b4c0cd97a01bcb8 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=7b4c4e64c91c50413b4c0cd97a01bcb8] 2024-12-06T10:19:31,102 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742491_1667 (size=4469) 2024-12-06T10:19:31,105 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742490_1666 (size=12527) 2024-12-06T10:19:31,110 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/.tmp/B/5df2647ef06344fbbea96e1bbbbd4db9 as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/B/5df2647ef06344fbbea96e1bbbbd4db9 2024-12-06T10:19:31,114 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 7b4c4e64c91c50413b4c0cd97a01bcb8/B of 7b4c4e64c91c50413b4c0cd97a01bcb8 into 5df2647ef06344fbbea96e1bbbbd4db9(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-06T10:19:31,114 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 7b4c4e64c91c50413b4c0cd97a01bcb8: 2024-12-06T10:19:31,114 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8., storeName=7b4c4e64c91c50413b4c0cd97a01bcb8/B, priority=12, startTime=1733480371039; duration=0sec 2024-12-06T10:19:31,114 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-06T10:19:31,114 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 7b4c4e64c91c50413b4c0cd97a01bcb8:B 2024-12-06T10:19:31,114 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-06T10:19:31,115 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48644 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-06T10:19:31,115 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HStore(1540): 7b4c4e64c91c50413b4c0cd97a01bcb8/C is initiating minor compaction (all files) 2024-12-06T10:19:31,115 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 7b4c4e64c91c50413b4c0cd97a01bcb8/C in TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8. 2024-12-06T10:19:31,116 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/C/47c087f5b34a4c91ae360ed1b1448e7c, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/C/38e4e9970d35450ba2b3e842748e194c, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/C/706c0fcd9e3f4f269c9ab64704065d08, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/C/3ee830ee3c5f4809be4fe4f04ccbd42f] into tmpdir=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/.tmp, totalSize=47.5 K 2024-12-06T10:19:31,116 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.Compactor(224): Compacting 47c087f5b34a4c91ae360ed1b1448e7c, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=118, earliestPutTs=1733480366885 2024-12-06T10:19:31,116 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.Compactor(224): Compacting 38e4e9970d35450ba2b3e842748e194c, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=133, earliestPutTs=1733480367504 2024-12-06T10:19:31,116 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.Compactor(224): Compacting 706c0fcd9e3f4f269c9ab64704065d08, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=157, earliestPutTs=1733480367627 2024-12-06T10:19:31,117 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.Compactor(224): Compacting 3ee830ee3c5f4809be4fe4f04ccbd42f, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=173, earliestPutTs=1733480368766 2024-12-06T10:19:31,123 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 7b4c4e64c91c50413b4c0cd97a01bcb8#C#compaction#566 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-06T10:19:31,124 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/.tmp/C/8219808a4bba4ad1955ed274539e28b3 is 50, key is test_row_0/C:col10/1733480369378/Put/seqid=0 2024-12-06T10:19:31,127 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742492_1668 (size=12527) 2024-12-06T10:19:31,475 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:19:31,479 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412067713126145f344549b384c37d304998d_7b4c4e64c91c50413b4c0cd97a01bcb8 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412067713126145f344549b384c37d304998d_7b4c4e64c91c50413b4c0cd97a01bcb8 2024-12-06T10:19:31,479 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/.tmp/A/45c5720d7e9e4bca974cb5892f5c5a26, store: [table=TestAcidGuarantees family=A region=7b4c4e64c91c50413b4c0cd97a01bcb8] 2024-12-06T10:19:31,480 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/.tmp/A/45c5720d7e9e4bca974cb5892f5c5a26 is 175, key is test_row_0/A:col10/1733480369398/Put/seqid=0 2024-12-06T10:19:31,483 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742493_1669 (size=31105) 2024-12-06T10:19:31,484 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=193, memsize=38.0 K, hasBloomFilter=true, into tmp file hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/.tmp/A/45c5720d7e9e4bca974cb5892f5c5a26 2024-12-06T10:19:31,500 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/.tmp/B/03788a449d14443fbb68dfd672955f1d is 50, key is test_row_0/B:col10/1733480369398/Put/seqid=0 2024-12-06T10:19:31,503 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742494_1670 (size=12151) 2024-12-06T10:19:31,504 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 7b4c4e64c91c50413b4c0cd97a01bcb8#A#compaction#564 average throughput is 0.06 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-06T10:19:31,504 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/.tmp/A/e56e88d4849a446b93e0d1891c9826e8 is 175, key is test_row_0/A:col10/1733480369378/Put/seqid=0 2024-12-06T10:19:31,505 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=193 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/.tmp/B/03788a449d14443fbb68dfd672955f1d 2024-12-06T10:19:31,508 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742495_1671 (size=31481) 2024-12-06T10:19:31,514 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/.tmp/C/d01c348c72094819888d72a376ace071 is 50, key is test_row_0/C:col10/1733480369398/Put/seqid=0 2024-12-06T10:19:31,516 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/.tmp/A/e56e88d4849a446b93e0d1891c9826e8 as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/A/e56e88d4849a446b93e0d1891c9826e8 2024-12-06T10:19:31,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(8581): Flush requested on 7b4c4e64c91c50413b4c0cd97a01bcb8 2024-12-06T10:19:31,518 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8. as already flushing 2024-12-06T10:19:31,524 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 7b4c4e64c91c50413b4c0cd97a01bcb8/A of 7b4c4e64c91c50413b4c0cd97a01bcb8 into e56e88d4849a446b93e0d1891c9826e8(size=30.7 K), total size for store is 30.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-06T10:19:31,524 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 7b4c4e64c91c50413b4c0cd97a01bcb8: 2024-12-06T10:19:31,524 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8., storeName=7b4c4e64c91c50413b4c0cd97a01bcb8/A, priority=12, startTime=1733480371039; duration=0sec 2024-12-06T10:19:31,525 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T10:19:31,525 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 7b4c4e64c91c50413b4c0cd97a01bcb8:A 2024-12-06T10:19:31,532 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/.tmp/C/8219808a4bba4ad1955ed274539e28b3 as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/C/8219808a4bba4ad1955ed274539e28b3 2024-12-06T10:19:31,532 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:31,532 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:31,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 104 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56948 deadline: 1733480431527, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:31,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57012 deadline: 1733480431528, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:31,533 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742496_1672 (size=12151) 2024-12-06T10:19:31,533 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:31,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57006 deadline: 1733480431531, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:31,534 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=193 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/.tmp/C/d01c348c72094819888d72a376ace071 2024-12-06T10:19:31,535 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:31,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 99 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57004 deadline: 1733480431532, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:31,536 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:31,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 99 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56996 deadline: 1733480431533, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:31,537 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 7b4c4e64c91c50413b4c0cd97a01bcb8/C of 7b4c4e64c91c50413b4c0cd97a01bcb8 into 8219808a4bba4ad1955ed274539e28b3(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-06T10:19:31,537 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 7b4c4e64c91c50413b4c0cd97a01bcb8: 2024-12-06T10:19:31,537 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8., storeName=7b4c4e64c91c50413b4c0cd97a01bcb8/C, priority=12, startTime=1733480371039; duration=0sec 2024-12-06T10:19:31,537 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T10:19:31,537 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 7b4c4e64c91c50413b4c0cd97a01bcb8:C 2024-12-06T10:19:31,539 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/.tmp/A/45c5720d7e9e4bca974cb5892f5c5a26 as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/A/45c5720d7e9e4bca974cb5892f5c5a26 2024-12-06T10:19:31,543 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/A/45c5720d7e9e4bca974cb5892f5c5a26, entries=150, sequenceid=193, filesize=30.4 K 2024-12-06T10:19:31,543 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/.tmp/B/03788a449d14443fbb68dfd672955f1d as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/B/03788a449d14443fbb68dfd672955f1d 2024-12-06T10:19:31,546 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/B/03788a449d14443fbb68dfd672955f1d, entries=150, sequenceid=193, filesize=11.9 K 2024-12-06T10:19:31,547 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/.tmp/C/d01c348c72094819888d72a376ace071 as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/C/d01c348c72094819888d72a376ace071 2024-12-06T10:19:31,550 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/C/d01c348c72094819888d72a376ace071, entries=150, sequenceid=193, filesize=11.9 K 2024-12-06T10:19:31,551 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.HRegion(3040): Finished flush of dataSize ~114.05 KB/116790, heapSize ~299.53 KB/306720, currentSize=87.22 KB/89310 for 7b4c4e64c91c50413b4c0cd97a01bcb8 in 509ms, sequenceid=193, compaction requested=false 2024-12-06T10:19:31,551 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.HRegion(2538): Flush status journal for 7b4c4e64c91c50413b4c0cd97a01bcb8: 2024-12-06T10:19:31,551 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8. 2024-12-06T10:19:31,551 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=172 2024-12-06T10:19:31,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33169 {}] master.HMaster(4106): Remote procedure done, pid=172 2024-12-06T10:19:31,553 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=172, resume processing ppid=171 2024-12-06T10:19:31,553 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=172, ppid=171, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.0490 sec 2024-12-06T10:19:31,554 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=171, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=171, table=TestAcidGuarantees in 2.0520 sec 2024-12-06T10:19:31,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=171 2024-12-06T10:19:31,606 INFO [Thread-2725 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 171 completed 2024-12-06T10:19:31,607 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33169 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-06T10:19:31,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33169 {}] procedure2.ProcedureExecutor(1098): Stored pid=173, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=173, table=TestAcidGuarantees 2024-12-06T10:19:31,609 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=173, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=173, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-06T10:19:31,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=173 2024-12-06T10:19:31,609 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=173, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=173, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-06T10:19:31,609 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=174, ppid=173, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-06T10:19:31,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(8581): Flush requested on 7b4c4e64c91c50413b4c0cd97a01bcb8 2024-12-06T10:19:31,637 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 7b4c4e64c91c50413b4c0cd97a01bcb8 3/3 column families, dataSize=93.93 KB heapSize=246.84 KB 2024-12-06T10:19:31,637 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7b4c4e64c91c50413b4c0cd97a01bcb8, store=A 2024-12-06T10:19:31,637 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:19:31,637 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7b4c4e64c91c50413b4c0cd97a01bcb8, store=B 2024-12-06T10:19:31,637 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:19:31,637 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7b4c4e64c91c50413b4c0cd97a01bcb8, store=C 2024-12-06T10:19:31,638 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:19:31,643 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412069ccc8198519a4d418395bffba55ee895_7b4c4e64c91c50413b4c0cd97a01bcb8 is 50, key is test_row_0/A:col10/1733480371530/Put/seqid=0 2024-12-06T10:19:31,647 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742497_1673 (size=14794) 2024-12-06T10:19:31,654 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:31,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 104 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57004 deadline: 1733480431649, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:31,657 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:31,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 104 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57006 deadline: 1733480431652, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:31,657 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:31,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 104 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56996 deadline: 1733480431653, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:31,658 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:31,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 110 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56948 deadline: 1733480431654, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:31,658 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:31,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57012 deadline: 1733480431654, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:31,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=173 2024-12-06T10:19:31,757 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:31,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 106 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57004 deadline: 1733480431755, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:31,760 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:31,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 112 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56948 deadline: 1733480431758, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:31,761 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,33397,1733480204743 2024-12-06T10:19:31,761 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:31,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 106 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57006 deadline: 1733480431759, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:31,761 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33397 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=174 2024-12-06T10:19:31,761 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:31,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 106 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56996 deadline: 1733480431759, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:31,761 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8. 2024-12-06T10:19:31,761 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8. as already flushing 2024-12-06T10:19:31,761 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8. 2024-12-06T10:19:31,761 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:31,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 104 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57012 deadline: 1733480431759, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:31,761 ERROR [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] handler.RSProcedureHandler(58): pid=174 java.io.IOException: Unable to complete flush {ENCODED => 7b4c4e64c91c50413b4c0cd97a01bcb8, NAME => 'TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:19:31,762 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=174 java.io.IOException: Unable to complete flush {ENCODED => 7b4c4e64c91c50413b4c0cd97a01bcb8, NAME => 'TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:19:31,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33169 {}] master.HMaster(4114): Remote procedure failed, pid=174 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7b4c4e64c91c50413b4c0cd97a01bcb8, NAME => 'TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7b4c4e64c91c50413b4c0cd97a01bcb8, NAME => 'TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:19:31,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=173 2024-12-06T10:19:31,914 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,33397,1733480204743 2024-12-06T10:19:31,914 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33397 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=174 2024-12-06T10:19:31,914 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8. 2024-12-06T10:19:31,914 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8. as already flushing 2024-12-06T10:19:31,914 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8. 2024-12-06T10:19:31,914 ERROR [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] handler.RSProcedureHandler(58): pid=174 java.io.IOException: Unable to complete flush {ENCODED => 7b4c4e64c91c50413b4c0cd97a01bcb8, NAME => 'TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:19:31,914 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=174 java.io.IOException: Unable to complete flush {ENCODED => 7b4c4e64c91c50413b4c0cd97a01bcb8, NAME => 'TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:19:31,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33169 {}] master.HMaster(4114): Remote procedure failed, pid=174 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7b4c4e64c91c50413b4c0cd97a01bcb8, NAME => 'TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7b4c4e64c91c50413b4c0cd97a01bcb8, NAME => 'TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:19:31,966 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:31,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 108 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57004 deadline: 1733480431965, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:31,967 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:31,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 108 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57006 deadline: 1733480431965, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:31,967 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:31,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 106 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57012 deadline: 1733480431965, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:31,967 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:31,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 108 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56996 deadline: 1733480431965, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:31,968 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:31,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 114 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56948 deadline: 1733480431965, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:32,048 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:19:32,055 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412069ccc8198519a4d418395bffba55ee895_7b4c4e64c91c50413b4c0cd97a01bcb8 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412069ccc8198519a4d418395bffba55ee895_7b4c4e64c91c50413b4c0cd97a01bcb8 2024-12-06T10:19:32,056 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/.tmp/A/3780fdb5d5294bbebeb7ffeca248c8d9, store: [table=TestAcidGuarantees family=A region=7b4c4e64c91c50413b4c0cd97a01bcb8] 2024-12-06T10:19:32,056 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/.tmp/A/3780fdb5d5294bbebeb7ffeca248c8d9 is 175, key is test_row_0/A:col10/1733480371530/Put/seqid=0 2024-12-06T10:19:32,066 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,33397,1733480204743 2024-12-06T10:19:32,067 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33397 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=174 2024-12-06T10:19:32,067 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8. 2024-12-06T10:19:32,067 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8. as already flushing 2024-12-06T10:19:32,067 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8. 2024-12-06T10:19:32,067 ERROR [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] handler.RSProcedureHandler(58): pid=174 java.io.IOException: Unable to complete flush {ENCODED => 7b4c4e64c91c50413b4c0cd97a01bcb8, NAME => 'TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:19:32,067 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=174 java.io.IOException: Unable to complete flush {ENCODED => 7b4c4e64c91c50413b4c0cd97a01bcb8, NAME => 'TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:19:32,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33169 {}] master.HMaster(4114): Remote procedure failed, pid=174 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7b4c4e64c91c50413b4c0cd97a01bcb8, NAME => 'TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7b4c4e64c91c50413b4c0cd97a01bcb8, NAME => 'TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:19:32,073 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742498_1674 (size=39749) 2024-12-06T10:19:32,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=173 2024-12-06T10:19:32,219 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,33397,1733480204743 2024-12-06T10:19:32,219 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33397 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=174 2024-12-06T10:19:32,220 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8. 2024-12-06T10:19:32,220 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8. as already flushing 2024-12-06T10:19:32,220 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8. 2024-12-06T10:19:32,220 ERROR [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] handler.RSProcedureHandler(58): pid=174 java.io.IOException: Unable to complete flush {ENCODED => 7b4c4e64c91c50413b4c0cd97a01bcb8, NAME => 'TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:19:32,220 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=174 java.io.IOException: Unable to complete flush {ENCODED => 7b4c4e64c91c50413b4c0cd97a01bcb8, NAME => 'TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:19:32,222 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33169 {}] master.HMaster(4114): Remote procedure failed, pid=174 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7b4c4e64c91c50413b4c0cd97a01bcb8, NAME => 'TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7b4c4e64c91c50413b4c0cd97a01bcb8, NAME => 'TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:19:32,269 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:32,269 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 110 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57004 deadline: 1733480432267, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:32,269 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:32,269 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 110 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56996 deadline: 1733480432268, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:32,271 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:32,271 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 110 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57006 deadline: 1733480432269, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:32,271 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:32,271 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 108 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57012 deadline: 1733480432269, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:32,272 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:32,272 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 116 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56948 deadline: 1733480432271, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:32,374 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,33397,1733480204743 2024-12-06T10:19:32,374 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33397 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=174 2024-12-06T10:19:32,375 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8. 2024-12-06T10:19:32,375 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8. as already flushing 2024-12-06T10:19:32,375 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8. 2024-12-06T10:19:32,375 ERROR [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] handler.RSProcedureHandler(58): pid=174 java.io.IOException: Unable to complete flush {ENCODED => 7b4c4e64c91c50413b4c0cd97a01bcb8, NAME => 'TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:19:32,375 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=174 java.io.IOException: Unable to complete flush {ENCODED => 7b4c4e64c91c50413b4c0cd97a01bcb8, NAME => 'TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:19:32,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33169 {}] master.HMaster(4114): Remote procedure failed, pid=174 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7b4c4e64c91c50413b4c0cd97a01bcb8, NAME => 'TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7b4c4e64c91c50413b4c0cd97a01bcb8, NAME => 'TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:19:32,474 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=214, memsize=33.5 K, hasBloomFilter=true, into tmp file hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/.tmp/A/3780fdb5d5294bbebeb7ffeca248c8d9 2024-12-06T10:19:32,480 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/.tmp/B/b752fe9f0506465d86ff8015d180eca4 is 50, key is test_row_0/B:col10/1733480371530/Put/seqid=0 2024-12-06T10:19:32,483 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742499_1675 (size=12151) 2024-12-06T10:19:32,484 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=214 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/.tmp/B/b752fe9f0506465d86ff8015d180eca4 2024-12-06T10:19:32,490 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/.tmp/C/4140854a20a84db993ea0e74a123d79a is 50, key is test_row_0/C:col10/1733480371530/Put/seqid=0 2024-12-06T10:19:32,493 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742500_1676 (size=12151) 2024-12-06T10:19:32,493 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=214 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/.tmp/C/4140854a20a84db993ea0e74a123d79a 2024-12-06T10:19:32,496 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/.tmp/A/3780fdb5d5294bbebeb7ffeca248c8d9 as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/A/3780fdb5d5294bbebeb7ffeca248c8d9 2024-12-06T10:19:32,499 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/A/3780fdb5d5294bbebeb7ffeca248c8d9, entries=200, sequenceid=214, filesize=38.8 K 2024-12-06T10:19:32,500 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/.tmp/B/b752fe9f0506465d86ff8015d180eca4 as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/B/b752fe9f0506465d86ff8015d180eca4 2024-12-06T10:19:32,502 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/B/b752fe9f0506465d86ff8015d180eca4, entries=150, sequenceid=214, filesize=11.9 K 2024-12-06T10:19:32,503 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/.tmp/C/4140854a20a84db993ea0e74a123d79a as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/C/4140854a20a84db993ea0e74a123d79a 2024-12-06T10:19:32,506 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/C/4140854a20a84db993ea0e74a123d79a, entries=150, sequenceid=214, filesize=11.9 K 2024-12-06T10:19:32,507 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~100.63 KB/103050, heapSize ~264.38 KB/270720, currentSize=107.34 KB/109920 for 7b4c4e64c91c50413b4c0cd97a01bcb8 in 870ms, sequenceid=214, compaction requested=true 2024-12-06T10:19:32,507 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 7b4c4e64c91c50413b4c0cd97a01bcb8: 2024-12-06T10:19:32,507 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 7b4c4e64c91c50413b4c0cd97a01bcb8:A, priority=-2147483648, current under compaction store size is 1 2024-12-06T10:19:32,507 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T10:19:32,507 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 7b4c4e64c91c50413b4c0cd97a01bcb8:B, priority=-2147483648, current under compaction store size is 2 2024-12-06T10:19:32,507 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T10:19:32,507 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-06T10:19:32,507 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 7b4c4e64c91c50413b4c0cd97a01bcb8:C, priority=-2147483648, current under compaction store size is 3 2024-12-06T10:19:32,507 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-06T10:19:32,507 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-06T10:19:32,508 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36829 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-06T10:19:32,508 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 102335 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-06T10:19:32,508 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HStore(1540): 7b4c4e64c91c50413b4c0cd97a01bcb8/B is initiating minor compaction (all files) 2024-12-06T10:19:32,508 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HStore(1540): 7b4c4e64c91c50413b4c0cd97a01bcb8/A is initiating minor compaction (all files) 2024-12-06T10:19:32,508 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 7b4c4e64c91c50413b4c0cd97a01bcb8/B in TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8. 2024-12-06T10:19:32,508 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 7b4c4e64c91c50413b4c0cd97a01bcb8/A in TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8. 2024-12-06T10:19:32,508 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/B/5df2647ef06344fbbea96e1bbbbd4db9, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/B/03788a449d14443fbb68dfd672955f1d, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/B/b752fe9f0506465d86ff8015d180eca4] into tmpdir=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/.tmp, totalSize=36.0 K 2024-12-06T10:19:32,508 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/A/e56e88d4849a446b93e0d1891c9826e8, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/A/45c5720d7e9e4bca974cb5892f5c5a26, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/A/3780fdb5d5294bbebeb7ffeca248c8d9] into tmpdir=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/.tmp, totalSize=99.9 K 2024-12-06T10:19:32,508 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8. 2024-12-06T10:19:32,508 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8. files: [hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/A/e56e88d4849a446b93e0d1891c9826e8, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/A/45c5720d7e9e4bca974cb5892f5c5a26, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/A/3780fdb5d5294bbebeb7ffeca248c8d9] 2024-12-06T10:19:32,508 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.Compactor(224): Compacting 5df2647ef06344fbbea96e1bbbbd4db9, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=173, earliestPutTs=1733480368766 2024-12-06T10:19:32,508 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.Compactor(224): Compacting e56e88d4849a446b93e0d1891c9826e8, keycount=150, bloomtype=ROW, size=30.7 K, encoding=NONE, compression=NONE, seqNum=173, earliestPutTs=1733480368766 2024-12-06T10:19:32,509 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.Compactor(224): Compacting 03788a449d14443fbb68dfd672955f1d, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=193, earliestPutTs=1733480369391 2024-12-06T10:19:32,509 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.Compactor(224): Compacting 45c5720d7e9e4bca974cb5892f5c5a26, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=193, earliestPutTs=1733480369391 2024-12-06T10:19:32,509 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.Compactor(224): Compacting b752fe9f0506465d86ff8015d180eca4, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=214, earliestPutTs=1733480371530 2024-12-06T10:19:32,509 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.Compactor(224): Compacting 3780fdb5d5294bbebeb7ffeca248c8d9, keycount=200, bloomtype=ROW, size=38.8 K, encoding=NONE, compression=NONE, seqNum=214, earliestPutTs=1733480371530 2024-12-06T10:19:32,515 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=7b4c4e64c91c50413b4c0cd97a01bcb8] 2024-12-06T10:19:32,516 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 7b4c4e64c91c50413b4c0cd97a01bcb8#B#compaction#573 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-06T10:19:32,517 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/.tmp/B/fe7ea649e15b47b49d421cd56bd28eda is 50, key is test_row_0/B:col10/1733480371530/Put/seqid=0 2024-12-06T10:19:32,519 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241206c4d63ea2215d42ce801b8074f7ce5fcb_7b4c4e64c91c50413b4c0cd97a01bcb8 store=[table=TestAcidGuarantees family=A region=7b4c4e64c91c50413b4c0cd97a01bcb8] 2024-12-06T10:19:32,521 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241206c4d63ea2215d42ce801b8074f7ce5fcb_7b4c4e64c91c50413b4c0cd97a01bcb8, store=[table=TestAcidGuarantees family=A region=7b4c4e64c91c50413b4c0cd97a01bcb8] 2024-12-06T10:19:32,521 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241206c4d63ea2215d42ce801b8074f7ce5fcb_7b4c4e64c91c50413b4c0cd97a01bcb8 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=7b4c4e64c91c50413b4c0cd97a01bcb8] 2024-12-06T10:19:32,521 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742501_1677 (size=12629) 2024-12-06T10:19:32,524 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742502_1678 (size=4469) 2024-12-06T10:19:32,526 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 7b4c4e64c91c50413b4c0cd97a01bcb8#A#compaction#572 average throughput is 2.22 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-06T10:19:32,527 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,33397,1733480204743 2024-12-06T10:19:32,527 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/.tmp/A/a9582847497a4644afa56a1c46390610 is 175, key is test_row_0/A:col10/1733480371530/Put/seqid=0 2024-12-06T10:19:32,527 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33397 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=174 2024-12-06T10:19:32,527 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8. 2024-12-06T10:19:32,527 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HRegion(2837): Flushing 7b4c4e64c91c50413b4c0cd97a01bcb8 3/3 column families, dataSize=107.34 KB heapSize=282 KB 2024-12-06T10:19:32,528 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7b4c4e64c91c50413b4c0cd97a01bcb8, store=A 2024-12-06T10:19:32,528 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:19:32,528 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7b4c4e64c91c50413b4c0cd97a01bcb8, store=B 2024-12-06T10:19:32,528 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:19:32,528 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7b4c4e64c91c50413b4c0cd97a01bcb8, store=C 2024-12-06T10:19:32,528 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:19:32,537 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742503_1679 (size=31583) 2024-12-06T10:19:32,545 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241206571ee31ebd384460a73f8652ef705cec_7b4c4e64c91c50413b4c0cd97a01bcb8 is 50, key is test_row_0/A:col10/1733480371653/Put/seqid=0 2024-12-06T10:19:32,549 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742504_1680 (size=12304) 2024-12-06T10:19:32,549 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:19:32,553 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241206571ee31ebd384460a73f8652ef705cec_7b4c4e64c91c50413b4c0cd97a01bcb8 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241206571ee31ebd384460a73f8652ef705cec_7b4c4e64c91c50413b4c0cd97a01bcb8 2024-12-06T10:19:32,554 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/.tmp/A/04d7ff1a217a4c40ae67cda6ecf35f63, store: [table=TestAcidGuarantees family=A region=7b4c4e64c91c50413b4c0cd97a01bcb8] 2024-12-06T10:19:32,554 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/.tmp/A/04d7ff1a217a4c40ae67cda6ecf35f63 is 175, key is test_row_0/A:col10/1733480371653/Put/seqid=0 2024-12-06T10:19:32,581 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742505_1681 (size=31105) 2024-12-06T10:19:32,581 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=233, memsize=35.8 K, hasBloomFilter=true, into tmp file hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/.tmp/A/04d7ff1a217a4c40ae67cda6ecf35f63 2024-12-06T10:19:32,598 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/.tmp/B/3706886ff78d4d46b5ee9ac3923e3806 is 50, key is test_row_0/B:col10/1733480371653/Put/seqid=0 2024-12-06T10:19:32,604 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742506_1682 (size=12151) 2024-12-06T10:19:32,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=173 2024-12-06T10:19:32,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(8581): Flush requested on 7b4c4e64c91c50413b4c0cd97a01bcb8 2024-12-06T10:19:32,773 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8. as already flushing 2024-12-06T10:19:32,829 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:32,829 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:32,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 112 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57012 deadline: 1733480432786, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:32,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 115 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57004 deadline: 1733480432786, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:32,832 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:32,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 116 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56996 deadline: 1733480432829, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:32,833 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:32,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 121 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56948 deadline: 1733480432829, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:32,833 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:32,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 115 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57006 deadline: 1733480432829, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:32,926 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/.tmp/B/fe7ea649e15b47b49d421cd56bd28eda as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/B/fe7ea649e15b47b49d421cd56bd28eda 2024-12-06T10:19:32,931 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:32,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 117 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57004 deadline: 1733480432930, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:32,932 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 7b4c4e64c91c50413b4c0cd97a01bcb8/B of 7b4c4e64c91c50413b4c0cd97a01bcb8 into fe7ea649e15b47b49d421cd56bd28eda(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-06T10:19:32,932 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 7b4c4e64c91c50413b4c0cd97a01bcb8: 2024-12-06T10:19:32,932 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8., storeName=7b4c4e64c91c50413b4c0cd97a01bcb8/B, priority=13, startTime=1733480372507; duration=0sec 2024-12-06T10:19:32,932 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:32,932 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-06T10:19:32,932 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 7b4c4e64c91c50413b4c0cd97a01bcb8:B 2024-12-06T10:19:32,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 114 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57012 deadline: 1733480432930, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:32,932 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-06T10:19:32,933 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36829 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-06T10:19:32,933 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HStore(1540): 7b4c4e64c91c50413b4c0cd97a01bcb8/C is initiating minor compaction (all files) 2024-12-06T10:19:32,933 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 7b4c4e64c91c50413b4c0cd97a01bcb8/C in TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8. 2024-12-06T10:19:32,933 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/C/8219808a4bba4ad1955ed274539e28b3, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/C/d01c348c72094819888d72a376ace071, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/C/4140854a20a84db993ea0e74a123d79a] into tmpdir=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/.tmp, totalSize=36.0 K 2024-12-06T10:19:32,934 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.Compactor(224): Compacting 8219808a4bba4ad1955ed274539e28b3, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=173, earliestPutTs=1733480368766 2024-12-06T10:19:32,934 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.Compactor(224): Compacting d01c348c72094819888d72a376ace071, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=193, earliestPutTs=1733480369391 2024-12-06T10:19:32,934 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.Compactor(224): Compacting 4140854a20a84db993ea0e74a123d79a, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=214, earliestPutTs=1733480371530 2024-12-06T10:19:32,937 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:32,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 118 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56996 deadline: 1733480432933, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:32,937 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:32,937 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:32,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 123 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56948 deadline: 1733480432934, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:32,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 117 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57006 deadline: 1733480432934, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:32,940 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 7b4c4e64c91c50413b4c0cd97a01bcb8#C#compaction#576 average throughput is unlimited, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-06T10:19:32,941 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/.tmp/C/f1ef3f0c801147969459c3eb9fcbbca3 is 50, key is test_row_0/C:col10/1733480371530/Put/seqid=0 2024-12-06T10:19:32,945 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/.tmp/A/a9582847497a4644afa56a1c46390610 as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/A/a9582847497a4644afa56a1c46390610 2024-12-06T10:19:32,946 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742507_1683 (size=12629) 2024-12-06T10:19:32,949 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 7b4c4e64c91c50413b4c0cd97a01bcb8/A of 7b4c4e64c91c50413b4c0cd97a01bcb8 into a9582847497a4644afa56a1c46390610(size=30.8 K), total size for store is 30.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-06T10:19:32,949 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 7b4c4e64c91c50413b4c0cd97a01bcb8: 2024-12-06T10:19:32,949 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8., storeName=7b4c4e64c91c50413b4c0cd97a01bcb8/A, priority=13, startTime=1733480372507; duration=0sec 2024-12-06T10:19:32,949 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T10:19:32,949 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 7b4c4e64c91c50413b4c0cd97a01bcb8:A 2024-12-06T10:19:32,954 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/.tmp/C/f1ef3f0c801147969459c3eb9fcbbca3 as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/C/f1ef3f0c801147969459c3eb9fcbbca3 2024-12-06T10:19:32,958 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 7b4c4e64c91c50413b4c0cd97a01bcb8/C of 7b4c4e64c91c50413b4c0cd97a01bcb8 into f1ef3f0c801147969459c3eb9fcbbca3(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-06T10:19:32,958 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 7b4c4e64c91c50413b4c0cd97a01bcb8: 2024-12-06T10:19:32,958 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8., storeName=7b4c4e64c91c50413b4c0cd97a01bcb8/C, priority=13, startTime=1733480372507; duration=0sec 2024-12-06T10:19:32,958 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T10:19:32,959 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 7b4c4e64c91c50413b4c0cd97a01bcb8:C 2024-12-06T10:19:33,005 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=233 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/.tmp/B/3706886ff78d4d46b5ee9ac3923e3806 2024-12-06T10:19:33,011 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/.tmp/C/457ae07b6b6c43aa8d8b1c71109242bc is 50, key is test_row_0/C:col10/1733480371653/Put/seqid=0 2024-12-06T10:19:33,014 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742508_1684 (size=12151) 2024-12-06T10:19:33,134 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:33,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 119 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57004 deadline: 1733480433133, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:33,136 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:33,137 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 116 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57012 deadline: 1733480433134, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:33,139 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:33,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 120 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56996 deadline: 1733480433138, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:33,140 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:33,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 119 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57006 deadline: 1733480433140, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:33,142 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:33,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 125 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56948 deadline: 1733480433140, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:33,415 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=233 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/.tmp/C/457ae07b6b6c43aa8d8b1c71109242bc 2024-12-06T10:19:33,420 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/.tmp/A/04d7ff1a217a4c40ae67cda6ecf35f63 as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/A/04d7ff1a217a4c40ae67cda6ecf35f63 2024-12-06T10:19:33,423 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/A/04d7ff1a217a4c40ae67cda6ecf35f63, entries=150, sequenceid=233, filesize=30.4 K 2024-12-06T10:19:33,424 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/.tmp/B/3706886ff78d4d46b5ee9ac3923e3806 as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/B/3706886ff78d4d46b5ee9ac3923e3806 2024-12-06T10:19:33,427 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/B/3706886ff78d4d46b5ee9ac3923e3806, entries=150, sequenceid=233, filesize=11.9 K 2024-12-06T10:19:33,427 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/.tmp/C/457ae07b6b6c43aa8d8b1c71109242bc as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/C/457ae07b6b6c43aa8d8b1c71109242bc 2024-12-06T10:19:33,430 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/C/457ae07b6b6c43aa8d8b1c71109242bc, entries=150, sequenceid=233, filesize=11.9 K 2024-12-06T10:19:33,431 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HRegion(3040): Finished flush of dataSize ~107.34 KB/109920, heapSize ~281.95 KB/288720, currentSize=100.63 KB/103050 for 7b4c4e64c91c50413b4c0cd97a01bcb8 in 904ms, sequenceid=233, compaction requested=false 2024-12-06T10:19:33,431 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HRegion(2538): Flush status journal for 7b4c4e64c91c50413b4c0cd97a01bcb8: 2024-12-06T10:19:33,431 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8. 2024-12-06T10:19:33,431 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=174 2024-12-06T10:19:33,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33169 {}] master.HMaster(4106): Remote procedure done, pid=174 2024-12-06T10:19:33,433 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=174, resume processing ppid=173 2024-12-06T10:19:33,433 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=174, ppid=173, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.8230 sec 2024-12-06T10:19:33,434 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=173, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=173, table=TestAcidGuarantees in 1.8270 sec 2024-12-06T10:19:33,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(8581): Flush requested on 7b4c4e64c91c50413b4c0cd97a01bcb8 2024-12-06T10:19:33,439 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 7b4c4e64c91c50413b4c0cd97a01bcb8 3/3 column families, dataSize=107.34 KB heapSize=282 KB 2024-12-06T10:19:33,439 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7b4c4e64c91c50413b4c0cd97a01bcb8, store=A 2024-12-06T10:19:33,439 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:19:33,439 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7b4c4e64c91c50413b4c0cd97a01bcb8, store=B 2024-12-06T10:19:33,439 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:19:33,439 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7b4c4e64c91c50413b4c0cd97a01bcb8, store=C 2024-12-06T10:19:33,439 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:19:33,444 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412062fcf5d0990cb422cbcd5d2fea95029e3_7b4c4e64c91c50413b4c0cd97a01bcb8 is 50, key is test_row_0/A:col10/1733480373438/Put/seqid=0 2024-12-06T10:19:33,448 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742509_1685 (size=12304) 2024-12-06T10:19:33,454 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:33,454 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 124 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56996 deadline: 1733480433451, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:33,455 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:33,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 122 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57012 deadline: 1733480433452, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:33,455 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:33,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 129 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56948 deadline: 1733480433453, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:33,456 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:33,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 124 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57006 deadline: 1733480433454, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:33,456 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:33,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 126 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57004 deadline: 1733480433454, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:33,556 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:33,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 126 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56996 deadline: 1733480433555, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:33,557 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:33,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 131 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56948 deadline: 1733480433556, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:33,557 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:33,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 124 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57012 deadline: 1733480433556, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:33,558 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:33,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 126 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57006 deadline: 1733480433557, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:33,558 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:33,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 128 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57004 deadline: 1733480433557, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:33,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=173 2024-12-06T10:19:33,713 INFO [Thread-2725 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 173 completed 2024-12-06T10:19:33,714 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33169 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-06T10:19:33,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33169 {}] procedure2.ProcedureExecutor(1098): Stored pid=175, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=175, table=TestAcidGuarantees 2024-12-06T10:19:33,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=175 2024-12-06T10:19:33,716 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=175, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=175, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-06T10:19:33,717 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=175, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=175, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-06T10:19:33,717 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=176, ppid=175, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-06T10:19:33,759 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:33,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 128 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56996 deadline: 1733480433757, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:33,760 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:33,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 126 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57012 deadline: 1733480433758, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:33,760 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:33,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 133 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56948 deadline: 1733480433759, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:33,763 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:33,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 130 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57004 deadline: 1733480433760, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:33,763 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:33,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 128 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57006 deadline: 1733480433760, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:33,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=175 2024-12-06T10:19:33,849 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:19:33,852 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412062fcf5d0990cb422cbcd5d2fea95029e3_7b4c4e64c91c50413b4c0cd97a01bcb8 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412062fcf5d0990cb422cbcd5d2fea95029e3_7b4c4e64c91c50413b4c0cd97a01bcb8 2024-12-06T10:19:33,853 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/.tmp/A/90e355b49e304873a4b47aa8c18f8724, store: [table=TestAcidGuarantees family=A region=7b4c4e64c91c50413b4c0cd97a01bcb8] 2024-12-06T10:19:33,854 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/.tmp/A/90e355b49e304873a4b47aa8c18f8724 is 175, key is test_row_0/A:col10/1733480373438/Put/seqid=0 2024-12-06T10:19:33,859 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742510_1686 (size=31105) 2024-12-06T10:19:33,867 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,33397,1733480204743 2024-12-06T10:19:33,867 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33397 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=176 2024-12-06T10:19:33,867 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8. 2024-12-06T10:19:33,867 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8. as already flushing 2024-12-06T10:19:33,868 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8. 2024-12-06T10:19:33,868 ERROR [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] handler.RSProcedureHandler(58): pid=176 java.io.IOException: Unable to complete flush {ENCODED => 7b4c4e64c91c50413b4c0cd97a01bcb8, NAME => 'TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:19:33,868 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=176 java.io.IOException: Unable to complete flush {ENCODED => 7b4c4e64c91c50413b4c0cd97a01bcb8, NAME => 'TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:19:33,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33169 {}] master.HMaster(4114): Remote procedure failed, pid=176 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7b4c4e64c91c50413b4c0cd97a01bcb8, NAME => 'TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7b4c4e64c91c50413b4c0cd97a01bcb8, NAME => 'TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:19:34,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=175 2024-12-06T10:19:34,019 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,33397,1733480204743 2024-12-06T10:19:34,020 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33397 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=176 2024-12-06T10:19:34,020 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8. 2024-12-06T10:19:34,020 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8. as already flushing 2024-12-06T10:19:34,020 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8. 2024-12-06T10:19:34,020 ERROR [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] handler.RSProcedureHandler(58): pid=176 java.io.IOException: Unable to complete flush {ENCODED => 7b4c4e64c91c50413b4c0cd97a01bcb8, NAME => 'TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:19:34,020 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=176 java.io.IOException: Unable to complete flush {ENCODED => 7b4c4e64c91c50413b4c0cd97a01bcb8, NAME => 'TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:19:34,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33169 {}] master.HMaster(4114): Remote procedure failed, pid=176 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7b4c4e64c91c50413b4c0cd97a01bcb8, NAME => 'TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7b4c4e64c91c50413b4c0cd97a01bcb8, NAME => 'TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:19:34,063 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:34,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 128 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57012 deadline: 1733480434061, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:34,069 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:34,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 130 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56996 deadline: 1733480434068, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:34,070 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:34,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 135 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56948 deadline: 1733480434068, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:34,070 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:34,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 132 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57004 deadline: 1733480434068, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:34,070 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:34,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 130 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57006 deadline: 1733480434069, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:34,172 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,33397,1733480204743 2024-12-06T10:19:34,172 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33397 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=176 2024-12-06T10:19:34,172 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8. 2024-12-06T10:19:34,172 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8. as already flushing 2024-12-06T10:19:34,172 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8. 2024-12-06T10:19:34,172 ERROR [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=176}] handler.RSProcedureHandler(58): pid=176 java.io.IOException: Unable to complete flush {ENCODED => 7b4c4e64c91c50413b4c0cd97a01bcb8, NAME => 'TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:19:34,173 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=176 java.io.IOException: Unable to complete flush {ENCODED => 7b4c4e64c91c50413b4c0cd97a01bcb8, NAME => 'TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:19:34,173 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33169 {}] master.HMaster(4114): Remote procedure failed, pid=176 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7b4c4e64c91c50413b4c0cd97a01bcb8, NAME => 'TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7b4c4e64c91c50413b4c0cd97a01bcb8, NAME => 'TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:19:34,259 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=255, memsize=35.8 K, hasBloomFilter=true, into tmp file hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/.tmp/A/90e355b49e304873a4b47aa8c18f8724 2024-12-06T10:19:34,267 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/.tmp/B/7b8c40d6a1904399a6e16fe07c23133f is 50, key is test_row_0/B:col10/1733480373438/Put/seqid=0 2024-12-06T10:19:34,275 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742511_1687 (size=12151) 2024-12-06T10:19:34,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=175 2024-12-06T10:19:34,324 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,33397,1733480204743 2024-12-06T10:19:34,325 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33397 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=176 2024-12-06T10:19:34,325 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8. 2024-12-06T10:19:34,325 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8. as already flushing 2024-12-06T10:19:34,325 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8. 2024-12-06T10:19:34,325 ERROR [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] handler.RSProcedureHandler(58): pid=176 java.io.IOException: Unable to complete flush {ENCODED => 7b4c4e64c91c50413b4c0cd97a01bcb8, NAME => 'TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:19:34,325 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=176 java.io.IOException: Unable to complete flush {ENCODED => 7b4c4e64c91c50413b4c0cd97a01bcb8, NAME => 'TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:19:34,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33169 {}] master.HMaster(4114): Remote procedure failed, pid=176 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7b4c4e64c91c50413b4c0cd97a01bcb8, NAME => 'TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7b4c4e64c91c50413b4c0cd97a01bcb8, NAME => 'TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:19:34,477 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,33397,1733480204743 2024-12-06T10:19:34,477 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33397 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=176 2024-12-06T10:19:34,478 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8. 2024-12-06T10:19:34,478 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8. as already flushing 2024-12-06T10:19:34,478 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8. 2024-12-06T10:19:34,478 ERROR [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] handler.RSProcedureHandler(58): pid=176 java.io.IOException: Unable to complete flush {ENCODED => 7b4c4e64c91c50413b4c0cd97a01bcb8, NAME => 'TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:19:34,478 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=176 java.io.IOException: Unable to complete flush {ENCODED => 7b4c4e64c91c50413b4c0cd97a01bcb8, NAME => 'TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:19:34,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33169 {}] master.HMaster(4114): Remote procedure failed, pid=176 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7b4c4e64c91c50413b4c0cd97a01bcb8, NAME => 'TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7b4c4e64c91c50413b4c0cd97a01bcb8, NAME => 'TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:19:34,567 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:34,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 130 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57012 deadline: 1733480434566, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:34,572 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:34,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 134 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57004 deadline: 1733480434571, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:34,575 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:34,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 132 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56996 deadline: 1733480434573, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:34,575 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:34,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 137 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56948 deadline: 1733480434574, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:34,576 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:34,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 132 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57006 deadline: 1733480434575, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:34,630 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,33397,1733480204743 2024-12-06T10:19:34,630 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33397 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=176 2024-12-06T10:19:34,630 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8. 2024-12-06T10:19:34,630 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8. as already flushing 2024-12-06T10:19:34,630 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8. 2024-12-06T10:19:34,630 ERROR [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=176}] handler.RSProcedureHandler(58): pid=176 java.io.IOException: Unable to complete flush {ENCODED => 7b4c4e64c91c50413b4c0cd97a01bcb8, NAME => 'TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:19:34,631 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=176 java.io.IOException: Unable to complete flush {ENCODED => 7b4c4e64c91c50413b4c0cd97a01bcb8, NAME => 'TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:19:34,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33169 {}] master.HMaster(4114): Remote procedure failed, pid=176 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7b4c4e64c91c50413b4c0cd97a01bcb8, NAME => 'TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7b4c4e64c91c50413b4c0cd97a01bcb8, NAME => 'TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:19:34,676 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=255 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/.tmp/B/7b8c40d6a1904399a6e16fe07c23133f 2024-12-06T10:19:34,683 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/.tmp/C/e42225d60ec447118afbee16a149e5c8 is 50, key is test_row_0/C:col10/1733480373438/Put/seqid=0 2024-12-06T10:19:34,687 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742512_1688 (size=12151) 2024-12-06T10:19:34,782 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,33397,1733480204743 2024-12-06T10:19:34,783 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33397 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=176 2024-12-06T10:19:34,783 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8. 2024-12-06T10:19:34,783 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8. as already flushing 2024-12-06T10:19:34,783 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8. 2024-12-06T10:19:34,783 ERROR [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] handler.RSProcedureHandler(58): pid=176 java.io.IOException: Unable to complete flush {ENCODED => 7b4c4e64c91c50413b4c0cd97a01bcb8, NAME => 'TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:19:34,783 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=176 java.io.IOException: Unable to complete flush {ENCODED => 7b4c4e64c91c50413b4c0cd97a01bcb8, NAME => 'TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:19:34,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33169 {}] master.HMaster(4114): Remote procedure failed, pid=176 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7b4c4e64c91c50413b4c0cd97a01bcb8, NAME => 'TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7b4c4e64c91c50413b4c0cd97a01bcb8, NAME => 'TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:19:34,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=175 2024-12-06T10:19:34,935 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,33397,1733480204743 2024-12-06T10:19:34,935 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33397 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=176 2024-12-06T10:19:34,935 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8. 2024-12-06T10:19:34,935 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8. as already flushing 2024-12-06T10:19:34,935 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8. 2024-12-06T10:19:34,936 ERROR [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] handler.RSProcedureHandler(58): pid=176 java.io.IOException: Unable to complete flush {ENCODED => 7b4c4e64c91c50413b4c0cd97a01bcb8, NAME => 'TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:19:34,936 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=176 java.io.IOException: Unable to complete flush {ENCODED => 7b4c4e64c91c50413b4c0cd97a01bcb8, NAME => 'TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:19:34,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33169 {}] master.HMaster(4114): Remote procedure failed, pid=176 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7b4c4e64c91c50413b4c0cd97a01bcb8, NAME => 'TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7b4c4e64c91c50413b4c0cd97a01bcb8, NAME => 'TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:19:35,087 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,33397,1733480204743 2024-12-06T10:19:35,087 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=255 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/.tmp/C/e42225d60ec447118afbee16a149e5c8 2024-12-06T10:19:35,088 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33397 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=176 2024-12-06T10:19:35,088 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8. 2024-12-06T10:19:35,088 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8. as already flushing 2024-12-06T10:19:35,088 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8. 2024-12-06T10:19:35,088 ERROR [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=176}] handler.RSProcedureHandler(58): pid=176 java.io.IOException: Unable to complete flush {ENCODED => 7b4c4e64c91c50413b4c0cd97a01bcb8, NAME => 'TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:19:35,088 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=176 java.io.IOException: Unable to complete flush {ENCODED => 7b4c4e64c91c50413b4c0cd97a01bcb8, NAME => 'TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:19:35,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33169 {}] master.HMaster(4114): Remote procedure failed, pid=176 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7b4c4e64c91c50413b4c0cd97a01bcb8, NAME => 'TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7b4c4e64c91c50413b4c0cd97a01bcb8, NAME => 'TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:19:35,094 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/.tmp/A/90e355b49e304873a4b47aa8c18f8724 as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/A/90e355b49e304873a4b47aa8c18f8724 2024-12-06T10:19:35,097 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/A/90e355b49e304873a4b47aa8c18f8724, entries=150, sequenceid=255, filesize=30.4 K 2024-12-06T10:19:35,097 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/.tmp/B/7b8c40d6a1904399a6e16fe07c23133f as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/B/7b8c40d6a1904399a6e16fe07c23133f 2024-12-06T10:19:35,100 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/B/7b8c40d6a1904399a6e16fe07c23133f, entries=150, sequenceid=255, filesize=11.9 K 2024-12-06T10:19:35,101 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/.tmp/C/e42225d60ec447118afbee16a149e5c8 as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/C/e42225d60ec447118afbee16a149e5c8 2024-12-06T10:19:35,104 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/C/e42225d60ec447118afbee16a149e5c8, entries=150, sequenceid=255, filesize=11.9 K 2024-12-06T10:19:35,104 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~107.34 KB/109920, heapSize ~281.95 KB/288720, currentSize=100.63 KB/103050 for 7b4c4e64c91c50413b4c0cd97a01bcb8 in 1666ms, sequenceid=255, compaction requested=true 2024-12-06T10:19:35,104 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 7b4c4e64c91c50413b4c0cd97a01bcb8: 2024-12-06T10:19:35,105 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 7b4c4e64c91c50413b4c0cd97a01bcb8:A, priority=-2147483648, current under compaction store size is 1 2024-12-06T10:19:35,105 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T10:19:35,105 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-06T10:19:35,105 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 7b4c4e64c91c50413b4c0cd97a01bcb8:B, priority=-2147483648, current under compaction store size is 2 2024-12-06T10:19:35,105 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T10:19:35,105 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-06T10:19:35,105 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 7b4c4e64c91c50413b4c0cd97a01bcb8:C, priority=-2147483648, current under compaction store size is 3 2024-12-06T10:19:35,105 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-06T10:19:35,105 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 93793 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-06T10:19:35,105 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36931 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-06T10:19:35,106 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HStore(1540): 7b4c4e64c91c50413b4c0cd97a01bcb8/B is initiating minor compaction (all files) 2024-12-06T10:19:35,106 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HStore(1540): 7b4c4e64c91c50413b4c0cd97a01bcb8/A is initiating minor compaction (all files) 2024-12-06T10:19:35,106 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 7b4c4e64c91c50413b4c0cd97a01bcb8/B in TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8. 2024-12-06T10:19:35,106 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 7b4c4e64c91c50413b4c0cd97a01bcb8/A in TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8. 2024-12-06T10:19:35,106 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/B/fe7ea649e15b47b49d421cd56bd28eda, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/B/3706886ff78d4d46b5ee9ac3923e3806, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/B/7b8c40d6a1904399a6e16fe07c23133f] into tmpdir=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/.tmp, totalSize=36.1 K 2024-12-06T10:19:35,106 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/A/a9582847497a4644afa56a1c46390610, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/A/04d7ff1a217a4c40ae67cda6ecf35f63, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/A/90e355b49e304873a4b47aa8c18f8724] into tmpdir=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/.tmp, totalSize=91.6 K 2024-12-06T10:19:35,106 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8. 2024-12-06T10:19:35,106 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8. files: [hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/A/a9582847497a4644afa56a1c46390610, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/A/04d7ff1a217a4c40ae67cda6ecf35f63, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/A/90e355b49e304873a4b47aa8c18f8724] 2024-12-06T10:19:35,106 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.Compactor(224): Compacting fe7ea649e15b47b49d421cd56bd28eda, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=214, earliestPutTs=1733480371530 2024-12-06T10:19:35,106 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.Compactor(224): Compacting a9582847497a4644afa56a1c46390610, keycount=150, bloomtype=ROW, size=30.8 K, encoding=NONE, compression=NONE, seqNum=214, earliestPutTs=1733480371530 2024-12-06T10:19:35,106 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.Compactor(224): Compacting 3706886ff78d4d46b5ee9ac3923e3806, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=233, earliestPutTs=1733480371648 2024-12-06T10:19:35,106 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.Compactor(224): Compacting 04d7ff1a217a4c40ae67cda6ecf35f63, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=233, earliestPutTs=1733480371648 2024-12-06T10:19:35,107 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.Compactor(224): Compacting 7b8c40d6a1904399a6e16fe07c23133f, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=255, earliestPutTs=1733480372782 2024-12-06T10:19:35,107 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.Compactor(224): Compacting 90e355b49e304873a4b47aa8c18f8724, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=255, earliestPutTs=1733480372782 2024-12-06T10:19:35,113 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=7b4c4e64c91c50413b4c0cd97a01bcb8] 2024-12-06T10:19:35,114 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 7b4c4e64c91c50413b4c0cd97a01bcb8#B#compaction#581 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-06T10:19:35,114 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241206d40daa96ab4749098462883aa93a5120_7b4c4e64c91c50413b4c0cd97a01bcb8 store=[table=TestAcidGuarantees family=A region=7b4c4e64c91c50413b4c0cd97a01bcb8] 2024-12-06T10:19:35,115 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/.tmp/B/983efaefac5d4a3c8af447dc19681c1c is 50, key is test_row_0/B:col10/1733480373438/Put/seqid=0 2024-12-06T10:19:35,116 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241206d40daa96ab4749098462883aa93a5120_7b4c4e64c91c50413b4c0cd97a01bcb8, store=[table=TestAcidGuarantees family=A region=7b4c4e64c91c50413b4c0cd97a01bcb8] 2024-12-06T10:19:35,116 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241206d40daa96ab4749098462883aa93a5120_7b4c4e64c91c50413b4c0cd97a01bcb8 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=7b4c4e64c91c50413b4c0cd97a01bcb8] 2024-12-06T10:19:35,120 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742513_1689 (size=12731) 2024-12-06T10:19:35,121 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742514_1690 (size=4469) 2024-12-06T10:19:35,240 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,33397,1733480204743 2024-12-06T10:19:35,240 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33397 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=176 2024-12-06T10:19:35,241 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8. 2024-12-06T10:19:35,241 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.HRegion(2837): Flushing 7b4c4e64c91c50413b4c0cd97a01bcb8 3/3 column families, dataSize=100.63 KB heapSize=264.42 KB 2024-12-06T10:19:35,241 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7b4c4e64c91c50413b4c0cd97a01bcb8, store=A 2024-12-06T10:19:35,241 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:19:35,241 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7b4c4e64c91c50413b4c0cd97a01bcb8, store=B 2024-12-06T10:19:35,241 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:19:35,241 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7b4c4e64c91c50413b4c0cd97a01bcb8, store=C 2024-12-06T10:19:35,241 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:19:35,249 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241206e58bcfcce41f4114bcf73f56035a008d_7b4c4e64c91c50413b4c0cd97a01bcb8 is 50, key is test_row_0/A:col10/1733480373452/Put/seqid=0 2024-12-06T10:19:35,252 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742515_1691 (size=12454) 2024-12-06T10:19:35,523 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 7b4c4e64c91c50413b4c0cd97a01bcb8#A#compaction#582 average throughput is 0.06 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-06T10:19:35,524 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/.tmp/A/4c07ff91e8464650a9f5892771ecd0a1 is 175, key is test_row_0/A:col10/1733480373438/Put/seqid=0 2024-12-06T10:19:35,524 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/.tmp/B/983efaefac5d4a3c8af447dc19681c1c as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/B/983efaefac5d4a3c8af447dc19681c1c 2024-12-06T10:19:35,527 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742516_1692 (size=31685) 2024-12-06T10:19:35,528 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 7b4c4e64c91c50413b4c0cd97a01bcb8/B of 7b4c4e64c91c50413b4c0cd97a01bcb8 into 983efaefac5d4a3c8af447dc19681c1c(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-06T10:19:35,529 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 7b4c4e64c91c50413b4c0cd97a01bcb8: 2024-12-06T10:19:35,529 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8., storeName=7b4c4e64c91c50413b4c0cd97a01bcb8/B, priority=13, startTime=1733480375105; duration=0sec 2024-12-06T10:19:35,529 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-06T10:19:35,529 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 7b4c4e64c91c50413b4c0cd97a01bcb8:B 2024-12-06T10:19:35,529 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-06T10:19:35,529 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36931 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-06T10:19:35,530 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HStore(1540): 7b4c4e64c91c50413b4c0cd97a01bcb8/C is initiating minor compaction (all files) 2024-12-06T10:19:35,530 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 7b4c4e64c91c50413b4c0cd97a01bcb8/C in TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8. 2024-12-06T10:19:35,530 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/C/f1ef3f0c801147969459c3eb9fcbbca3, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/C/457ae07b6b6c43aa8d8b1c71109242bc, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/C/e42225d60ec447118afbee16a149e5c8] into tmpdir=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/.tmp, totalSize=36.1 K 2024-12-06T10:19:35,530 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.Compactor(224): Compacting f1ef3f0c801147969459c3eb9fcbbca3, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=214, earliestPutTs=1733480371530 2024-12-06T10:19:35,530 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.Compactor(224): Compacting 457ae07b6b6c43aa8d8b1c71109242bc, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=233, earliestPutTs=1733480371648 2024-12-06T10:19:35,531 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.Compactor(224): Compacting e42225d60ec447118afbee16a149e5c8, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=255, earliestPutTs=1733480372782 2024-12-06T10:19:35,538 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 7b4c4e64c91c50413b4c0cd97a01bcb8#C#compaction#584 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-06T10:19:35,538 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/.tmp/C/30d7cb6499394cbc9cac4be95994f409 is 50, key is test_row_0/C:col10/1733480373438/Put/seqid=0 2024-12-06T10:19:35,561 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742517_1693 (size=12731) 2024-12-06T10:19:35,566 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/.tmp/C/30d7cb6499394cbc9cac4be95994f409 as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/C/30d7cb6499394cbc9cac4be95994f409 2024-12-06T10:19:35,571 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 7b4c4e64c91c50413b4c0cd97a01bcb8/C of 7b4c4e64c91c50413b4c0cd97a01bcb8 into 30d7cb6499394cbc9cac4be95994f409(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-06T10:19:35,571 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 7b4c4e64c91c50413b4c0cd97a01bcb8: 2024-12-06T10:19:35,571 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8., storeName=7b4c4e64c91c50413b4c0cd97a01bcb8/C, priority=13, startTime=1733480375105; duration=0sec 2024-12-06T10:19:35,571 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T10:19:35,571 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 7b4c4e64c91c50413b4c0cd97a01bcb8:C 2024-12-06T10:19:35,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(8581): Flush requested on 7b4c4e64c91c50413b4c0cd97a01bcb8 2024-12-06T10:19:35,579 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8. as already flushing 2024-12-06T10:19:35,597 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:35,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 141 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56948 deadline: 1733480435592, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:35,598 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:35,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 136 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57006 deadline: 1733480435593, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:35,600 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:35,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 136 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56996 deadline: 1733480435597, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:35,600 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:35,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 139 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57004 deadline: 1733480435597, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:35,600 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:35,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 138 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57012 deadline: 1733480435597, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:35,653 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:19:35,656 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241206e58bcfcce41f4114bcf73f56035a008d_7b4c4e64c91c50413b4c0cd97a01bcb8 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241206e58bcfcce41f4114bcf73f56035a008d_7b4c4e64c91c50413b4c0cd97a01bcb8 2024-12-06T10:19:35,657 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/.tmp/A/1873c7c0e73f46508be89edb54429db2, store: [table=TestAcidGuarantees family=A region=7b4c4e64c91c50413b4c0cd97a01bcb8] 2024-12-06T10:19:35,658 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/.tmp/A/1873c7c0e73f46508be89edb54429db2 is 175, key is test_row_0/A:col10/1733480373452/Put/seqid=0 2024-12-06T10:19:35,665 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742518_1694 (size=31255) 2024-12-06T10:19:35,666 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=273, memsize=33.5 K, hasBloomFilter=true, into tmp file hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/.tmp/A/1873c7c0e73f46508be89edb54429db2 2024-12-06T10:19:35,673 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/.tmp/B/f7f2acc3dbf14c6d9dd26a91b4bf1585 is 50, key is test_row_0/B:col10/1733480373452/Put/seqid=0 2024-12-06T10:19:35,679 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742519_1695 (size=12301) 2024-12-06T10:19:35,699 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:35,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 143 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56948 deadline: 1733480435698, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:35,701 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:35,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 138 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57006 deadline: 1733480435700, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:35,702 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:35,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 138 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56996 deadline: 1733480435701, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:35,702 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:35,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 141 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57004 deadline: 1733480435701, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:35,705 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:35,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 140 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57012 deadline: 1733480435702, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:35,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=175 2024-12-06T10:19:35,902 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:35,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 145 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56948 deadline: 1733480435901, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:35,905 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:35,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 140 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57006 deadline: 1733480435904, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:35,905 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:35,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 140 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56996 deadline: 1733480435904, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:35,906 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:35,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 143 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57004 deadline: 1733480435904, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:35,908 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:35,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 142 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57012 deadline: 1733480435907, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:35,935 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/.tmp/A/4c07ff91e8464650a9f5892771ecd0a1 as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/A/4c07ff91e8464650a9f5892771ecd0a1 2024-12-06T10:19:35,940 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 7b4c4e64c91c50413b4c0cd97a01bcb8/A of 7b4c4e64c91c50413b4c0cd97a01bcb8 into 4c07ff91e8464650a9f5892771ecd0a1(size=30.9 K), total size for store is 30.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-06T10:19:35,940 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 7b4c4e64c91c50413b4c0cd97a01bcb8: 2024-12-06T10:19:35,940 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8., storeName=7b4c4e64c91c50413b4c0cd97a01bcb8/A, priority=13, startTime=1733480375105; duration=0sec 2024-12-06T10:19:35,940 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T10:19:35,940 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 7b4c4e64c91c50413b4c0cd97a01bcb8:A 2024-12-06T10:19:36,079 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=273 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/.tmp/B/f7f2acc3dbf14c6d9dd26a91b4bf1585 2024-12-06T10:19:36,085 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/.tmp/C/f44072578a6745c195655e3b536e49b5 is 50, key is test_row_0/C:col10/1733480373452/Put/seqid=0 2024-12-06T10:19:36,089 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742520_1696 (size=12301) 2024-12-06T10:19:36,204 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:36,204 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 147 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56948 deadline: 1733480436203, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:36,207 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:36,207 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 142 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57006 deadline: 1733480436206, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:36,209 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:36,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 145 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57004 deadline: 1733480436207, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:36,209 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:36,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 142 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56996 deadline: 1733480436208, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:36,209 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:36,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 144 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57012 deadline: 1733480436209, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:36,489 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=273 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/.tmp/C/f44072578a6745c195655e3b536e49b5 2024-12-06T10:19:36,494 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/.tmp/A/1873c7c0e73f46508be89edb54429db2 as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/A/1873c7c0e73f46508be89edb54429db2 2024-12-06T10:19:36,497 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/A/1873c7c0e73f46508be89edb54429db2, entries=150, sequenceid=273, filesize=30.5 K 2024-12-06T10:19:36,498 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/.tmp/B/f7f2acc3dbf14c6d9dd26a91b4bf1585 as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/B/f7f2acc3dbf14c6d9dd26a91b4bf1585 2024-12-06T10:19:36,501 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/B/f7f2acc3dbf14c6d9dd26a91b4bf1585, entries=150, sequenceid=273, filesize=12.0 K 2024-12-06T10:19:36,502 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/.tmp/C/f44072578a6745c195655e3b536e49b5 as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/C/f44072578a6745c195655e3b536e49b5 2024-12-06T10:19:36,509 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/C/f44072578a6745c195655e3b536e49b5, entries=150, sequenceid=273, filesize=12.0 K 2024-12-06T10:19:36,510 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.HRegion(3040): Finished flush of dataSize ~100.63 KB/103050, heapSize ~264.38 KB/270720, currentSize=100.63 KB/103050 for 7b4c4e64c91c50413b4c0cd97a01bcb8 in 1269ms, sequenceid=273, compaction requested=false 2024-12-06T10:19:36,510 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.HRegion(2538): Flush status journal for 7b4c4e64c91c50413b4c0cd97a01bcb8: 2024-12-06T10:19:36,510 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8. 2024-12-06T10:19:36,510 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=176 2024-12-06T10:19:36,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33169 {}] master.HMaster(4106): Remote procedure done, pid=176 2024-12-06T10:19:36,512 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=176, resume processing ppid=175 2024-12-06T10:19:36,512 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=176, ppid=175, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.7940 sec 2024-12-06T10:19:36,513 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=175, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=175, table=TestAcidGuarantees in 2.7980 sec 2024-12-06T10:19:36,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(8581): Flush requested on 7b4c4e64c91c50413b4c0cd97a01bcb8 2024-12-06T10:19:36,710 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 7b4c4e64c91c50413b4c0cd97a01bcb8 3/3 column families, dataSize=107.34 KB heapSize=282 KB 2024-12-06T10:19:36,710 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7b4c4e64c91c50413b4c0cd97a01bcb8, store=A 2024-12-06T10:19:36,710 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:19:36,710 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7b4c4e64c91c50413b4c0cd97a01bcb8, store=B 2024-12-06T10:19:36,710 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:19:36,710 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7b4c4e64c91c50413b4c0cd97a01bcb8, store=C 2024-12-06T10:19:36,710 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:19:36,725 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:36,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 146 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56996 deadline: 1733480436721, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:36,725 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:36,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 149 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57004 deadline: 1733480436721, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:36,726 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:36,726 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:36,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 149 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57012 deadline: 1733480436722, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:36,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 147 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57006 deadline: 1733480436722, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:36,726 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:36,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 154 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56948 deadline: 1733480436724, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:36,732 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412065c9fa3a38fed4041a8a2a9e6d25bc178_7b4c4e64c91c50413b4c0cd97a01bcb8 is 50, key is test_row_0/A:col10/1733480376708/Put/seqid=0 2024-12-06T10:19:36,738 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742521_1697 (size=12454) 2024-12-06T10:19:36,740 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:19:36,744 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412065c9fa3a38fed4041a8a2a9e6d25bc178_7b4c4e64c91c50413b4c0cd97a01bcb8 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412065c9fa3a38fed4041a8a2a9e6d25bc178_7b4c4e64c91c50413b4c0cd97a01bcb8 2024-12-06T10:19:36,745 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/.tmp/A/bfb25a0119264023a81da272038504fd, store: [table=TestAcidGuarantees family=A region=7b4c4e64c91c50413b4c0cd97a01bcb8] 2024-12-06T10:19:36,745 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/.tmp/A/bfb25a0119264023a81da272038504fd is 175, key is test_row_0/A:col10/1733480376708/Put/seqid=0 2024-12-06T10:19:36,749 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742522_1698 (size=31255) 2024-12-06T10:19:36,751 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=295, memsize=35.8 K, hasBloomFilter=true, into tmp file hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/.tmp/A/bfb25a0119264023a81da272038504fd 2024-12-06T10:19:36,760 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/.tmp/B/54e9d59d842e424f8264da2db5d7cb1c is 50, key is test_row_0/B:col10/1733480376708/Put/seqid=0 2024-12-06T10:19:36,772 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742523_1699 (size=12301) 2024-12-06T10:19:36,828 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:36,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 148 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56996 deadline: 1733480436826, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:36,828 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:36,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 151 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57004 deadline: 1733480436826, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:36,828 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:36,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 151 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57012 deadline: 1733480436827, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:36,828 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:36,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 156 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56948 deadline: 1733480436827, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:36,829 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:36,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 149 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57006 deadline: 1733480436827, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:37,031 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:37,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 158 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56948 deadline: 1733480437029, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:37,031 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:37,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 150 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56996 deadline: 1733480437030, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:37,032 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:37,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 153 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57012 deadline: 1733480437030, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:37,032 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:37,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 153 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57004 deadline: 1733480437030, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:37,032 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:37,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 151 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57006 deadline: 1733480437031, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:37,172 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=295 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/.tmp/B/54e9d59d842e424f8264da2db5d7cb1c 2024-12-06T10:19:37,179 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/.tmp/C/dcc624091f954ab9b8fcac8a4930b39a is 50, key is test_row_0/C:col10/1733480376708/Put/seqid=0 2024-12-06T10:19:37,182 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742524_1700 (size=12301) 2024-12-06T10:19:37,183 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=295 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/.tmp/C/dcc624091f954ab9b8fcac8a4930b39a 2024-12-06T10:19:37,186 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/.tmp/A/bfb25a0119264023a81da272038504fd as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/A/bfb25a0119264023a81da272038504fd 2024-12-06T10:19:37,190 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/A/bfb25a0119264023a81da272038504fd, entries=150, sequenceid=295, filesize=30.5 K 2024-12-06T10:19:37,191 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/.tmp/B/54e9d59d842e424f8264da2db5d7cb1c as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/B/54e9d59d842e424f8264da2db5d7cb1c 2024-12-06T10:19:37,194 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/B/54e9d59d842e424f8264da2db5d7cb1c, entries=150, sequenceid=295, filesize=12.0 K 2024-12-06T10:19:37,195 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/.tmp/C/dcc624091f954ab9b8fcac8a4930b39a as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/C/dcc624091f954ab9b8fcac8a4930b39a 2024-12-06T10:19:37,198 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/C/dcc624091f954ab9b8fcac8a4930b39a, entries=150, sequenceid=295, filesize=12.0 K 2024-12-06T10:19:37,198 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~107.34 KB/109920, heapSize ~281.95 KB/288720, currentSize=93.93 KB/96180 for 7b4c4e64c91c50413b4c0cd97a01bcb8 in 488ms, sequenceid=295, compaction requested=true 2024-12-06T10:19:37,198 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 7b4c4e64c91c50413b4c0cd97a01bcb8: 2024-12-06T10:19:37,199 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 7b4c4e64c91c50413b4c0cd97a01bcb8:A, priority=-2147483648, current under compaction store size is 1 2024-12-06T10:19:37,199 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T10:19:37,199 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-06T10:19:37,199 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 7b4c4e64c91c50413b4c0cd97a01bcb8:B, priority=-2147483648, current under compaction store size is 2 2024-12-06T10:19:37,199 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-06T10:19:37,199 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T10:19:37,199 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 7b4c4e64c91c50413b4c0cd97a01bcb8:C, priority=-2147483648, current under compaction store size is 3 2024-12-06T10:19:37,199 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-06T10:19:37,200 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 94195 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-06T10:19:37,200 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37333 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-06T10:19:37,200 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HStore(1540): 7b4c4e64c91c50413b4c0cd97a01bcb8/B is initiating minor compaction (all files) 2024-12-06T10:19:37,200 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HStore(1540): 7b4c4e64c91c50413b4c0cd97a01bcb8/A is initiating minor compaction (all files) 2024-12-06T10:19:37,200 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 7b4c4e64c91c50413b4c0cd97a01bcb8/B in TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8. 2024-12-06T10:19:37,200 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 7b4c4e64c91c50413b4c0cd97a01bcb8/A in TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8. 2024-12-06T10:19:37,200 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/A/4c07ff91e8464650a9f5892771ecd0a1, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/A/1873c7c0e73f46508be89edb54429db2, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/A/bfb25a0119264023a81da272038504fd] into tmpdir=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/.tmp, totalSize=92.0 K 2024-12-06T10:19:37,200 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/B/983efaefac5d4a3c8af447dc19681c1c, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/B/f7f2acc3dbf14c6d9dd26a91b4bf1585, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/B/54e9d59d842e424f8264da2db5d7cb1c] into tmpdir=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/.tmp, totalSize=36.5 K 2024-12-06T10:19:37,200 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8. 2024-12-06T10:19:37,200 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8. files: [hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/A/4c07ff91e8464650a9f5892771ecd0a1, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/A/1873c7c0e73f46508be89edb54429db2, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/A/bfb25a0119264023a81da272038504fd] 2024-12-06T10:19:37,200 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.Compactor(224): Compacting 983efaefac5d4a3c8af447dc19681c1c, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=255, earliestPutTs=1733480372782 2024-12-06T10:19:37,201 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.Compactor(224): Compacting 4c07ff91e8464650a9f5892771ecd0a1, keycount=150, bloomtype=ROW, size=30.9 K, encoding=NONE, compression=NONE, seqNum=255, earliestPutTs=1733480372782 2024-12-06T10:19:37,201 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.Compactor(224): Compacting f7f2acc3dbf14c6d9dd26a91b4bf1585, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=273, earliestPutTs=1733480373442 2024-12-06T10:19:37,201 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1873c7c0e73f46508be89edb54429db2, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=273, earliestPutTs=1733480373442 2024-12-06T10:19:37,201 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.Compactor(224): Compacting 54e9d59d842e424f8264da2db5d7cb1c, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=295, earliestPutTs=1733480375590 2024-12-06T10:19:37,201 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.Compactor(224): Compacting bfb25a0119264023a81da272038504fd, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=295, earliestPutTs=1733480375590 2024-12-06T10:19:37,207 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=7b4c4e64c91c50413b4c0cd97a01bcb8] 2024-12-06T10:19:37,208 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 7b4c4e64c91c50413b4c0cd97a01bcb8#B#compaction#590 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-06T10:19:37,208 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/.tmp/B/7dfaa15b15e24fc6bba1c371ab66c237 is 50, key is test_row_0/B:col10/1733480376708/Put/seqid=0 2024-12-06T10:19:37,209 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241206000e5224ceba4b3ba6cab50310ccfd8b_7b4c4e64c91c50413b4c0cd97a01bcb8 store=[table=TestAcidGuarantees family=A region=7b4c4e64c91c50413b4c0cd97a01bcb8] 2024-12-06T10:19:37,210 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241206000e5224ceba4b3ba6cab50310ccfd8b_7b4c4e64c91c50413b4c0cd97a01bcb8, store=[table=TestAcidGuarantees family=A region=7b4c4e64c91c50413b4c0cd97a01bcb8] 2024-12-06T10:19:37,211 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241206000e5224ceba4b3ba6cab50310ccfd8b_7b4c4e64c91c50413b4c0cd97a01bcb8 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=7b4c4e64c91c50413b4c0cd97a01bcb8] 2024-12-06T10:19:37,215 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742525_1701 (size=12983) 2024-12-06T10:19:37,220 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742526_1702 (size=4469) 2024-12-06T10:19:37,222 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 7b4c4e64c91c50413b4c0cd97a01bcb8#A#compaction#591 average throughput is 1.63 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-06T10:19:37,222 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/.tmp/B/7dfaa15b15e24fc6bba1c371ab66c237 as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/B/7dfaa15b15e24fc6bba1c371ab66c237 2024-12-06T10:19:37,223 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/.tmp/A/987569584db54786810eb6f54c7c7fc2 is 175, key is test_row_0/A:col10/1733480376708/Put/seqid=0 2024-12-06T10:19:37,226 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 7b4c4e64c91c50413b4c0cd97a01bcb8/B of 7b4c4e64c91c50413b4c0cd97a01bcb8 into 7dfaa15b15e24fc6bba1c371ab66c237(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-06T10:19:37,227 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 7b4c4e64c91c50413b4c0cd97a01bcb8: 2024-12-06T10:19:37,227 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8., storeName=7b4c4e64c91c50413b4c0cd97a01bcb8/B, priority=13, startTime=1733480377199; duration=0sec 2024-12-06T10:19:37,227 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-06T10:19:37,227 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 7b4c4e64c91c50413b4c0cd97a01bcb8:B 2024-12-06T10:19:37,227 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-06T10:19:37,227 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742527_1703 (size=31937) 2024-12-06T10:19:37,228 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37333 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-06T10:19:37,228 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HStore(1540): 7b4c4e64c91c50413b4c0cd97a01bcb8/C is initiating minor compaction (all files) 2024-12-06T10:19:37,228 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 7b4c4e64c91c50413b4c0cd97a01bcb8/C in TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8. 2024-12-06T10:19:37,228 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/C/30d7cb6499394cbc9cac4be95994f409, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/C/f44072578a6745c195655e3b536e49b5, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/C/dcc624091f954ab9b8fcac8a4930b39a] into tmpdir=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/.tmp, totalSize=36.5 K 2024-12-06T10:19:37,228 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.Compactor(224): Compacting 30d7cb6499394cbc9cac4be95994f409, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=255, earliestPutTs=1733480372782 2024-12-06T10:19:37,229 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.Compactor(224): Compacting f44072578a6745c195655e3b536e49b5, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=273, earliestPutTs=1733480373442 2024-12-06T10:19:37,229 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.Compactor(224): Compacting dcc624091f954ab9b8fcac8a4930b39a, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=295, earliestPutTs=1733480375590 2024-12-06T10:19:37,240 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/.tmp/A/987569584db54786810eb6f54c7c7fc2 as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/A/987569584db54786810eb6f54c7c7fc2 2024-12-06T10:19:37,241 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 7b4c4e64c91c50413b4c0cd97a01bcb8#C#compaction#592 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-06T10:19:37,241 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/.tmp/C/4bfeab0051d74a88a647cf8469ab02a6 is 50, key is test_row_0/C:col10/1733480376708/Put/seqid=0 2024-12-06T10:19:37,245 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742528_1704 (size=12983) 2024-12-06T10:19:37,245 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 7b4c4e64c91c50413b4c0cd97a01bcb8/A of 7b4c4e64c91c50413b4c0cd97a01bcb8 into 987569584db54786810eb6f54c7c7fc2(size=31.2 K), total size for store is 31.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-06T10:19:37,245 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 7b4c4e64c91c50413b4c0cd97a01bcb8: 2024-12-06T10:19:37,245 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8., storeName=7b4c4e64c91c50413b4c0cd97a01bcb8/A, priority=13, startTime=1733480377199; duration=0sec 2024-12-06T10:19:37,246 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T10:19:37,246 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 7b4c4e64c91c50413b4c0cd97a01bcb8:A 2024-12-06T10:19:37,249 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/.tmp/C/4bfeab0051d74a88a647cf8469ab02a6 as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/C/4bfeab0051d74a88a647cf8469ab02a6 2024-12-06T10:19:37,253 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 7b4c4e64c91c50413b4c0cd97a01bcb8/C of 7b4c4e64c91c50413b4c0cd97a01bcb8 into 4bfeab0051d74a88a647cf8469ab02a6(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-06T10:19:37,253 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 7b4c4e64c91c50413b4c0cd97a01bcb8: 2024-12-06T10:19:37,253 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8., storeName=7b4c4e64c91c50413b4c0cd97a01bcb8/C, priority=13, startTime=1733480377199; duration=0sec 2024-12-06T10:19:37,253 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T10:19:37,253 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 7b4c4e64c91c50413b4c0cd97a01bcb8:C 2024-12-06T10:19:37,335 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(8581): Flush requested on 7b4c4e64c91c50413b4c0cd97a01bcb8 2024-12-06T10:19:37,335 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 7b4c4e64c91c50413b4c0cd97a01bcb8 3/3 column families, dataSize=107.34 KB heapSize=282 KB 2024-12-06T10:19:37,336 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7b4c4e64c91c50413b4c0cd97a01bcb8, store=A 2024-12-06T10:19:37,336 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:19:37,336 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7b4c4e64c91c50413b4c0cd97a01bcb8, store=B 2024-12-06T10:19:37,336 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:19:37,336 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7b4c4e64c91c50413b4c0cd97a01bcb8, store=C 2024-12-06T10:19:37,336 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:19:37,345 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024120647f6fc51f8b14d95bf5452b9e925ca83_7b4c4e64c91c50413b4c0cd97a01bcb8 is 50, key is test_row_0/A:col10/1733480377335/Put/seqid=0 2024-12-06T10:19:37,349 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742529_1705 (size=12454) 2024-12-06T10:19:37,349 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:37,349 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:37,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 155 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57006 deadline: 1733480437344, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:37,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 155 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56996 deadline: 1733480437346, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:37,349 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:37,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 159 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57012 deadline: 1733480437348, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:37,352 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:37,352 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 159 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57004 deadline: 1733480437349, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:37,352 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:37,352 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 164 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56948 deadline: 1733480437349, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:37,352 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:19:37,355 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024120647f6fc51f8b14d95bf5452b9e925ca83_7b4c4e64c91c50413b4c0cd97a01bcb8 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024120647f6fc51f8b14d95bf5452b9e925ca83_7b4c4e64c91c50413b4c0cd97a01bcb8 2024-12-06T10:19:37,356 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/.tmp/A/6d9ce4be09c64cd18915d448a21fdcb7, store: [table=TestAcidGuarantees family=A region=7b4c4e64c91c50413b4c0cd97a01bcb8] 2024-12-06T10:19:37,356 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/.tmp/A/6d9ce4be09c64cd18915d448a21fdcb7 is 175, key is test_row_0/A:col10/1733480377335/Put/seqid=0 2024-12-06T10:19:37,359 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742530_1706 (size=31255) 2024-12-06T10:19:37,451 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:37,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 157 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57006 deadline: 1733480437450, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:37,452 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:37,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 157 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56996 deadline: 1733480437450, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:37,452 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:37,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 161 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57012 deadline: 1733480437450, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:37,454 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:37,454 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 161 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57004 deadline: 1733480437453, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:37,455 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:37,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 166 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56948 deadline: 1733480437453, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:37,654 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:37,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 159 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56996 deadline: 1733480437653, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:37,654 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:37,655 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 159 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57006 deadline: 1733480437653, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:37,656 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:37,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 163 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57012 deadline: 1733480437655, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:37,658 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:37,658 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:37,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 168 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56948 deadline: 1733480437656, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:37,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 163 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57004 deadline: 1733480437656, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:37,760 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=318, memsize=38.0 K, hasBloomFilter=true, into tmp file hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/.tmp/A/6d9ce4be09c64cd18915d448a21fdcb7 2024-12-06T10:19:37,767 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/.tmp/B/7779e1792bc4414b819b79931c96f1be is 50, key is test_row_0/B:col10/1733480377335/Put/seqid=0 2024-12-06T10:19:37,770 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742531_1707 (size=12301) 2024-12-06T10:19:37,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=175 2024-12-06T10:19:37,821 INFO [Thread-2725 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 175 completed 2024-12-06T10:19:37,822 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33169 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-06T10:19:37,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33169 {}] procedure2.ProcedureExecutor(1098): Stored pid=177, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=177, table=TestAcidGuarantees 2024-12-06T10:19:37,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=177 2024-12-06T10:19:37,823 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=177, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=177, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-06T10:19:37,823 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=177, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=177, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-06T10:19:37,824 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=178, ppid=177, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-06T10:19:37,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=177 2024-12-06T10:19:37,957 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:37,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 161 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56996 deadline: 1733480437955, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:37,957 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:37,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 161 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57006 deadline: 1733480437955, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:37,959 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:37,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 165 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57012 deadline: 1733480437957, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:37,961 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:37,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 170 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56948 deadline: 1733480437959, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:37,962 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:37,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 165 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57004 deadline: 1733480437961, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:37,975 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,33397,1733480204743 2024-12-06T10:19:37,975 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33397 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=178 2024-12-06T10:19:37,975 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8. 2024-12-06T10:19:37,975 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8. as already flushing 2024-12-06T10:19:37,975 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8. 2024-12-06T10:19:37,975 ERROR [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] handler.RSProcedureHandler(58): pid=178 java.io.IOException: Unable to complete flush {ENCODED => 7b4c4e64c91c50413b4c0cd97a01bcb8, NAME => 'TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:19:37,976 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=178 java.io.IOException: Unable to complete flush {ENCODED => 7b4c4e64c91c50413b4c0cd97a01bcb8, NAME => 'TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:19:37,976 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33169 {}] master.HMaster(4114): Remote procedure failed, pid=178 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7b4c4e64c91c50413b4c0cd97a01bcb8, NAME => 'TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7b4c4e64c91c50413b4c0cd97a01bcb8, NAME => 'TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:19:38,124 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=177 2024-12-06T10:19:38,128 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,33397,1733480204743 2024-12-06T10:19:38,128 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33397 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=178 2024-12-06T10:19:38,128 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8. 2024-12-06T10:19:38,128 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8. as already flushing 2024-12-06T10:19:38,128 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8. 2024-12-06T10:19:38,128 ERROR [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=178}] handler.RSProcedureHandler(58): pid=178 java.io.IOException: Unable to complete flush {ENCODED => 7b4c4e64c91c50413b4c0cd97a01bcb8, NAME => 'TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:19:38,128 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=178 java.io.IOException: Unable to complete flush {ENCODED => 7b4c4e64c91c50413b4c0cd97a01bcb8, NAME => 'TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:19:38,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33169 {}] master.HMaster(4114): Remote procedure failed, pid=178 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7b4c4e64c91c50413b4c0cd97a01bcb8, NAME => 'TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7b4c4e64c91c50413b4c0cd97a01bcb8, NAME => 'TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:19:38,171 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=318 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/.tmp/B/7779e1792bc4414b819b79931c96f1be 2024-12-06T10:19:38,177 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/.tmp/C/292dd1fb3d5d47feb7269c0039520212 is 50, key is test_row_0/C:col10/1733480377335/Put/seqid=0 2024-12-06T10:19:38,180 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742532_1708 (size=12301) 2024-12-06T10:19:38,280 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,33397,1733480204743 2024-12-06T10:19:38,280 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33397 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=178 2024-12-06T10:19:38,280 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8. 2024-12-06T10:19:38,281 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8. as already flushing 2024-12-06T10:19:38,281 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8. 2024-12-06T10:19:38,281 ERROR [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] handler.RSProcedureHandler(58): pid=178 java.io.IOException: Unable to complete flush {ENCODED => 7b4c4e64c91c50413b4c0cd97a01bcb8, NAME => 'TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:19:38,281 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=178 java.io.IOException: Unable to complete flush {ENCODED => 7b4c4e64c91c50413b4c0cd97a01bcb8, NAME => 'TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:19:38,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33169 {}] master.HMaster(4114): Remote procedure failed, pid=178 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7b4c4e64c91c50413b4c0cd97a01bcb8, NAME => 'TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7b4c4e64c91c50413b4c0cd97a01bcb8, NAME => 'TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:19:38,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=177 2024-12-06T10:19:38,432 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,33397,1733480204743 2024-12-06T10:19:38,433 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33397 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=178 2024-12-06T10:19:38,433 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8. 2024-12-06T10:19:38,433 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8. as already flushing 2024-12-06T10:19:38,433 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8. 2024-12-06T10:19:38,433 ERROR [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] handler.RSProcedureHandler(58): pid=178 java.io.IOException: Unable to complete flush {ENCODED => 7b4c4e64c91c50413b4c0cd97a01bcb8, NAME => 'TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:19:38,433 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=178 java.io.IOException: Unable to complete flush {ENCODED => 7b4c4e64c91c50413b4c0cd97a01bcb8, NAME => 'TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:19:38,434 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33169 {}] master.HMaster(4114): Remote procedure failed, pid=178 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7b4c4e64c91c50413b4c0cd97a01bcb8, NAME => 'TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7b4c4e64c91c50413b4c0cd97a01bcb8, NAME => 'TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:19:38,459 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:38,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 163 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56996 deadline: 1733480438459, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:38,462 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:38,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 163 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57006 deadline: 1733480438460, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:38,462 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:38,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 167 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57012 deadline: 1733480438462, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:38,463 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:38,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 172 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56948 deadline: 1733480438462, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:38,465 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:38,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 167 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57004 deadline: 1733480438463, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:38,581 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=318 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/.tmp/C/292dd1fb3d5d47feb7269c0039520212 2024-12-06T10:19:38,585 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,33397,1733480204743 2024-12-06T10:19:38,585 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/.tmp/A/6d9ce4be09c64cd18915d448a21fdcb7 as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/A/6d9ce4be09c64cd18915d448a21fdcb7 2024-12-06T10:19:38,585 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33397 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=178 2024-12-06T10:19:38,585 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8. 2024-12-06T10:19:38,585 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8. as already flushing 2024-12-06T10:19:38,585 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8. 2024-12-06T10:19:38,585 ERROR [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=178}] handler.RSProcedureHandler(58): pid=178 java.io.IOException: Unable to complete flush {ENCODED => 7b4c4e64c91c50413b4c0cd97a01bcb8, NAME => 'TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:19:38,586 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=178 java.io.IOException: Unable to complete flush {ENCODED => 7b4c4e64c91c50413b4c0cd97a01bcb8, NAME => 'TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:19:38,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33169 {}] master.HMaster(4114): Remote procedure failed, pid=178 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7b4c4e64c91c50413b4c0cd97a01bcb8, NAME => 'TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7b4c4e64c91c50413b4c0cd97a01bcb8, NAME => 'TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:19:38,589 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/A/6d9ce4be09c64cd18915d448a21fdcb7, entries=150, sequenceid=318, filesize=30.5 K 2024-12-06T10:19:38,590 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/.tmp/B/7779e1792bc4414b819b79931c96f1be as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/B/7779e1792bc4414b819b79931c96f1be 2024-12-06T10:19:38,593 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/B/7779e1792bc4414b819b79931c96f1be, entries=150, sequenceid=318, filesize=12.0 K 2024-12-06T10:19:38,594 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/.tmp/C/292dd1fb3d5d47feb7269c0039520212 as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/C/292dd1fb3d5d47feb7269c0039520212 2024-12-06T10:19:38,597 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/C/292dd1fb3d5d47feb7269c0039520212, entries=150, sequenceid=318, filesize=12.0 K 2024-12-06T10:19:38,597 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~114.05 KB/116790, heapSize ~299.53 KB/306720, currentSize=93.93 KB/96180 for 7b4c4e64c91c50413b4c0cd97a01bcb8 in 1262ms, sequenceid=318, compaction requested=false 2024-12-06T10:19:38,597 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 7b4c4e64c91c50413b4c0cd97a01bcb8: 2024-12-06T10:19:38,737 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,33397,1733480204743 2024-12-06T10:19:38,738 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33397 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=178 2024-12-06T10:19:38,738 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8. 2024-12-06T10:19:38,738 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.HRegion(2837): Flushing 7b4c4e64c91c50413b4c0cd97a01bcb8 3/3 column families, dataSize=93.93 KB heapSize=246.84 KB 2024-12-06T10:19:38,738 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7b4c4e64c91c50413b4c0cd97a01bcb8, store=A 2024-12-06T10:19:38,738 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:19:38,738 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7b4c4e64c91c50413b4c0cd97a01bcb8, store=B 2024-12-06T10:19:38,738 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:19:38,738 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7b4c4e64c91c50413b4c0cd97a01bcb8, store=C 2024-12-06T10:19:38,738 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:19:38,747 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241206570fe4e2682141f68e3e066ffc1f66c2_7b4c4e64c91c50413b4c0cd97a01bcb8 is 50, key is test_row_0/A:col10/1733480377342/Put/seqid=0 2024-12-06T10:19:38,750 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742533_1709 (size=12454) 2024-12-06T10:19:38,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=177 2024-12-06T10:19:39,151 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:19:39,154 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241206570fe4e2682141f68e3e066ffc1f66c2_7b4c4e64c91c50413b4c0cd97a01bcb8 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241206570fe4e2682141f68e3e066ffc1f66c2_7b4c4e64c91c50413b4c0cd97a01bcb8 2024-12-06T10:19:39,155 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/.tmp/A/e48fa0cb80214cc69a85c75f1c7451dd, store: [table=TestAcidGuarantees family=A region=7b4c4e64c91c50413b4c0cd97a01bcb8] 2024-12-06T10:19:39,156 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/.tmp/A/e48fa0cb80214cc69a85c75f1c7451dd is 175, key is test_row_0/A:col10/1733480377342/Put/seqid=0 2024-12-06T10:19:39,159 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742534_1710 (size=31255) 2024-12-06T10:19:39,160 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=335, memsize=31.3 K, hasBloomFilter=true, into tmp file hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/.tmp/A/e48fa0cb80214cc69a85c75f1c7451dd 2024-12-06T10:19:39,166 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/.tmp/B/b6fecdf8708d4593827e02d3044c1fa8 is 50, key is test_row_0/B:col10/1733480377342/Put/seqid=0 2024-12-06T10:19:39,169 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742535_1711 (size=12301) 2024-12-06T10:19:39,170 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=335 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/.tmp/B/b6fecdf8708d4593827e02d3044c1fa8 2024-12-06T10:19:39,177 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/.tmp/C/4cdb39fab76546dfa1fe834817772248 is 50, key is test_row_0/C:col10/1733480377342/Put/seqid=0 2024-12-06T10:19:39,181 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742536_1712 (size=12301) 2024-12-06T10:19:39,465 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8. as already flushing 2024-12-06T10:19:39,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(8581): Flush requested on 7b4c4e64c91c50413b4c0cd97a01bcb8 2024-12-06T10:19:39,483 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:39,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 176 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56948 deadline: 1733480439479, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:39,483 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:39,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 170 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57006 deadline: 1733480439480, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:39,485 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:39,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 171 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57004 deadline: 1733480439482, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:39,487 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:39,487 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:39,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 173 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57012 deadline: 1733480439483, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:39,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 169 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56996 deadline: 1733480439483, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:39,582 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=335 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/.tmp/C/4cdb39fab76546dfa1fe834817772248 2024-12-06T10:19:39,585 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:39,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 178 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56948 deadline: 1733480439584, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:39,586 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:39,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 172 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57006 deadline: 1733480439585, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:39,586 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/.tmp/A/e48fa0cb80214cc69a85c75f1c7451dd as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/A/e48fa0cb80214cc69a85c75f1c7451dd 2024-12-06T10:19:39,588 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:39,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 173 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57004 deadline: 1733480439586, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:39,588 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:39,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 171 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56996 deadline: 1733480439587, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:39,588 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:39,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 175 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57012 deadline: 1733480439588, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:39,590 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/A/e48fa0cb80214cc69a85c75f1c7451dd, entries=150, sequenceid=335, filesize=30.5 K 2024-12-06T10:19:39,590 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/.tmp/B/b6fecdf8708d4593827e02d3044c1fa8 as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/B/b6fecdf8708d4593827e02d3044c1fa8 2024-12-06T10:19:39,593 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/B/b6fecdf8708d4593827e02d3044c1fa8, entries=150, sequenceid=335, filesize=12.0 K 2024-12-06T10:19:39,594 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/.tmp/C/4cdb39fab76546dfa1fe834817772248 as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/C/4cdb39fab76546dfa1fe834817772248 2024-12-06T10:19:39,597 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/C/4cdb39fab76546dfa1fe834817772248, entries=150, sequenceid=335, filesize=12.0 K 2024-12-06T10:19:39,598 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.HRegion(3040): Finished flush of dataSize ~93.93 KB/96180, heapSize ~246.80 KB/252720, currentSize=114.05 KB/116790 for 7b4c4e64c91c50413b4c0cd97a01bcb8 in 860ms, sequenceid=335, compaction requested=true 2024-12-06T10:19:39,598 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.HRegion(2538): Flush status journal for 7b4c4e64c91c50413b4c0cd97a01bcb8: 2024-12-06T10:19:39,598 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8. 2024-12-06T10:19:39,598 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=178 2024-12-06T10:19:39,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33169 {}] master.HMaster(4106): Remote procedure done, pid=178 2024-12-06T10:19:39,600 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=178, resume processing ppid=177 2024-12-06T10:19:39,600 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=178, ppid=177, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.7750 sec 2024-12-06T10:19:39,601 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=177, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=177, table=TestAcidGuarantees in 1.7780 sec 2024-12-06T10:19:39,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(8581): Flush requested on 7b4c4e64c91c50413b4c0cd97a01bcb8 2024-12-06T10:19:39,789 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 7b4c4e64c91c50413b4c0cd97a01bcb8 3/3 column families, dataSize=120.76 KB heapSize=317.16 KB 2024-12-06T10:19:39,792 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7b4c4e64c91c50413b4c0cd97a01bcb8, store=A 2024-12-06T10:19:39,792 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:19:39,792 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7b4c4e64c91c50413b4c0cd97a01bcb8, store=B 2024-12-06T10:19:39,792 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:19:39,792 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7b4c4e64c91c50413b4c0cd97a01bcb8, store=C 2024-12-06T10:19:39,792 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:19:39,800 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024120691079ae49c95431e92d527c52224e5cc_7b4c4e64c91c50413b4c0cd97a01bcb8 is 50, key is test_row_0/A:col10/1733480379789/Put/seqid=0 2024-12-06T10:19:39,803 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742537_1713 (size=12454) 2024-12-06T10:19:39,804 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:19:39,804 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:39,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 183 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56948 deadline: 1733480439799, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:39,804 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:39,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 177 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57004 deadline: 1733480439799, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:39,806 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:39,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 175 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56996 deadline: 1733480439802, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:39,806 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:39,806 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:39,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 179 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57012 deadline: 1733480439802, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:39,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 178 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57006 deadline: 1733480439803, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:39,807 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024120691079ae49c95431e92d527c52224e5cc_7b4c4e64c91c50413b4c0cd97a01bcb8 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024120691079ae49c95431e92d527c52224e5cc_7b4c4e64c91c50413b4c0cd97a01bcb8 2024-12-06T10:19:39,808 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/.tmp/A/2e48ae9ad14e49d99812aa2aea2005e9, store: [table=TestAcidGuarantees family=A region=7b4c4e64c91c50413b4c0cd97a01bcb8] 2024-12-06T10:19:39,809 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/.tmp/A/2e48ae9ad14e49d99812aa2aea2005e9 is 175, key is test_row_0/A:col10/1733480379789/Put/seqid=0 2024-12-06T10:19:39,812 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742538_1714 (size=31255) 2024-12-06T10:19:39,906 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:39,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 185 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56948 deadline: 1733480439905, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:39,907 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:39,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 179 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57004 deadline: 1733480439905, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:39,908 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:39,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 177 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56996 deadline: 1733480439907, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:39,908 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:39,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 181 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57012 deadline: 1733480439907, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:39,908 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:39,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 180 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57006 deadline: 1733480439907, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:39,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=177 2024-12-06T10:19:39,926 INFO [Thread-2725 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 177 completed 2024-12-06T10:19:39,927 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33169 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-06T10:19:39,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33169 {}] procedure2.ProcedureExecutor(1098): Stored pid=179, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=179, table=TestAcidGuarantees 2024-12-06T10:19:39,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=179 2024-12-06T10:19:39,929 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=179, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=179, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-06T10:19:39,930 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=179, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=179, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-06T10:19:39,930 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=180, ppid=179, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-06T10:19:40,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=179 2024-12-06T10:19:40,081 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,33397,1733480204743 2024-12-06T10:19:40,082 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33397 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=180 2024-12-06T10:19:40,082 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8. 2024-12-06T10:19:40,082 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8. as already flushing 2024-12-06T10:19:40,082 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8. 2024-12-06T10:19:40,082 ERROR [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=180}] handler.RSProcedureHandler(58): pid=180 java.io.IOException: Unable to complete flush {ENCODED => 7b4c4e64c91c50413b4c0cd97a01bcb8, NAME => 'TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:19:40,082 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=180 java.io.IOException: Unable to complete flush {ENCODED => 7b4c4e64c91c50413b4c0cd97a01bcb8, NAME => 'TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:19:40,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33169 {}] master.HMaster(4114): Remote procedure failed, pid=180 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7b4c4e64c91c50413b4c0cd97a01bcb8, NAME => 'TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7b4c4e64c91c50413b4c0cd97a01bcb8, NAME => 'TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:19:40,109 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:40,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 187 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56948 deadline: 1733480440107, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:40,112 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:40,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 182 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57006 deadline: 1733480440109, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:40,112 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:40,112 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:40,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 183 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57012 deadline: 1733480440110, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:40,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 179 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56996 deadline: 1733480440110, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:40,114 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:40,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 181 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57004 deadline: 1733480440111, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:40,213 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=357, memsize=42.5 K, hasBloomFilter=true, into tmp file hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/.tmp/A/2e48ae9ad14e49d99812aa2aea2005e9 2024-12-06T10:19:40,231 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=179 2024-12-06T10:19:40,234 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,33397,1733480204743 2024-12-06T10:19:40,234 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/.tmp/B/f4ff39b1516e42a5948f9048efd65725 is 50, key is test_row_0/B:col10/1733480379789/Put/seqid=0 2024-12-06T10:19:40,235 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33397 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=180 2024-12-06T10:19:40,235 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8. 2024-12-06T10:19:40,235 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8. as already flushing 2024-12-06T10:19:40,235 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8. 2024-12-06T10:19:40,235 ERROR [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=180}] handler.RSProcedureHandler(58): pid=180 java.io.IOException: Unable to complete flush {ENCODED => 7b4c4e64c91c50413b4c0cd97a01bcb8, NAME => 'TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:19:40,235 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=180 java.io.IOException: Unable to complete flush {ENCODED => 7b4c4e64c91c50413b4c0cd97a01bcb8, NAME => 'TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:19:40,236 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33169 {}] master.HMaster(4114): Remote procedure failed, pid=180 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7b4c4e64c91c50413b4c0cd97a01bcb8, NAME => 'TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7b4c4e64c91c50413b4c0cd97a01bcb8, NAME => 'TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:19:40,280 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742539_1715 (size=12301) 2024-12-06T10:19:40,284 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=357 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/.tmp/B/f4ff39b1516e42a5948f9048efd65725 2024-12-06T10:19:40,349 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/.tmp/C/9c5b49e8720e4c27b484d47bcfb7fb9b is 50, key is test_row_0/C:col10/1733480379789/Put/seqid=0 2024-12-06T10:19:40,372 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742540_1716 (size=12301) 2024-12-06T10:19:40,373 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=357 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/.tmp/C/9c5b49e8720e4c27b484d47bcfb7fb9b 2024-12-06T10:19:40,381 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/.tmp/A/2e48ae9ad14e49d99812aa2aea2005e9 as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/A/2e48ae9ad14e49d99812aa2aea2005e9 2024-12-06T10:19:40,386 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/A/2e48ae9ad14e49d99812aa2aea2005e9, entries=150, sequenceid=357, filesize=30.5 K 2024-12-06T10:19:40,387 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,33397,1733480204743 2024-12-06T10:19:40,387 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/.tmp/B/f4ff39b1516e42a5948f9048efd65725 as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/B/f4ff39b1516e42a5948f9048efd65725 2024-12-06T10:19:40,388 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33397 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=180 2024-12-06T10:19:40,388 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8. 2024-12-06T10:19:40,388 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8. as already flushing 2024-12-06T10:19:40,388 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8. 2024-12-06T10:19:40,388 ERROR [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=180}] handler.RSProcedureHandler(58): pid=180 java.io.IOException: Unable to complete flush {ENCODED => 7b4c4e64c91c50413b4c0cd97a01bcb8, NAME => 'TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:19:40,388 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=180 java.io.IOException: Unable to complete flush {ENCODED => 7b4c4e64c91c50413b4c0cd97a01bcb8, NAME => 'TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:19:40,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33169 {}] master.HMaster(4114): Remote procedure failed, pid=180 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7b4c4e64c91c50413b4c0cd97a01bcb8, NAME => 'TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7b4c4e64c91c50413b4c0cd97a01bcb8, NAME => 'TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:19:40,398 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/B/f4ff39b1516e42a5948f9048efd65725, entries=150, sequenceid=357, filesize=12.0 K 2024-12-06T10:19:40,399 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/.tmp/C/9c5b49e8720e4c27b484d47bcfb7fb9b as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/C/9c5b49e8720e4c27b484d47bcfb7fb9b 2024-12-06T10:19:40,403 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/C/9c5b49e8720e4c27b484d47bcfb7fb9b, entries=150, sequenceid=357, filesize=12.0 K 2024-12-06T10:19:40,404 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~127.47 KB/130530, heapSize ~334.69 KB/342720, currentSize=73.80 KB/75570 for 7b4c4e64c91c50413b4c0cd97a01bcb8 in 615ms, sequenceid=357, compaction requested=true 2024-12-06T10:19:40,404 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 7b4c4e64c91c50413b4c0cd97a01bcb8: 2024-12-06T10:19:40,404 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 7b4c4e64c91c50413b4c0cd97a01bcb8:A, priority=-2147483648, current under compaction store size is 1 2024-12-06T10:19:40,404 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T10:19:40,404 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-06T10:19:40,404 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-06T10:19:40,405 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 7b4c4e64c91c50413b4c0cd97a01bcb8:B, priority=-2147483648, current under compaction store size is 2 2024-12-06T10:19:40,405 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T10:19:40,405 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 7b4c4e64c91c50413b4c0cd97a01bcb8:C, priority=-2147483648, current under compaction store size is 3 2024-12-06T10:19:40,405 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-06T10:19:40,406 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49886 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-06T10:19:40,406 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HStore(1540): 7b4c4e64c91c50413b4c0cd97a01bcb8/B is initiating minor compaction (all files) 2024-12-06T10:19:40,406 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 7b4c4e64c91c50413b4c0cd97a01bcb8/B in TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8. 2024-12-06T10:19:40,406 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/B/7dfaa15b15e24fc6bba1c371ab66c237, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/B/7779e1792bc4414b819b79931c96f1be, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/B/b6fecdf8708d4593827e02d3044c1fa8, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/B/f4ff39b1516e42a5948f9048efd65725] into tmpdir=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/.tmp, totalSize=48.7 K 2024-12-06T10:19:40,406 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 125702 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-06T10:19:40,406 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.Compactor(224): Compacting 7dfaa15b15e24fc6bba1c371ab66c237, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=295, earliestPutTs=1733480375590 2024-12-06T10:19:40,406 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HStore(1540): 7b4c4e64c91c50413b4c0cd97a01bcb8/A is initiating minor compaction (all files) 2024-12-06T10:19:40,407 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 7b4c4e64c91c50413b4c0cd97a01bcb8/A in TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8. 2024-12-06T10:19:40,407 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/A/987569584db54786810eb6f54c7c7fc2, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/A/6d9ce4be09c64cd18915d448a21fdcb7, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/A/e48fa0cb80214cc69a85c75f1c7451dd, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/A/2e48ae9ad14e49d99812aa2aea2005e9] into tmpdir=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/.tmp, totalSize=122.8 K 2024-12-06T10:19:40,407 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=12 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8. 2024-12-06T10:19:40,407 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.Compactor(224): Compacting 7779e1792bc4414b819b79931c96f1be, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=318, earliestPutTs=1733480377333 2024-12-06T10:19:40,407 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8. files: [hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/A/987569584db54786810eb6f54c7c7fc2, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/A/6d9ce4be09c64cd18915d448a21fdcb7, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/A/e48fa0cb80214cc69a85c75f1c7451dd, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/A/2e48ae9ad14e49d99812aa2aea2005e9] 2024-12-06T10:19:40,407 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.Compactor(224): Compacting 987569584db54786810eb6f54c7c7fc2, keycount=150, bloomtype=ROW, size=31.2 K, encoding=NONE, compression=NONE, seqNum=295, earliestPutTs=1733480375590 2024-12-06T10:19:40,407 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.Compactor(224): Compacting b6fecdf8708d4593827e02d3044c1fa8, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=335, earliestPutTs=1733480377342 2024-12-06T10:19:40,407 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.Compactor(224): Compacting f4ff39b1516e42a5948f9048efd65725, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=357, earliestPutTs=1733480379478 2024-12-06T10:19:40,407 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.Compactor(224): Compacting 6d9ce4be09c64cd18915d448a21fdcb7, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=318, earliestPutTs=1733480377333 2024-12-06T10:19:40,408 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.Compactor(224): Compacting e48fa0cb80214cc69a85c75f1c7451dd, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=335, earliestPutTs=1733480377342 2024-12-06T10:19:40,408 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] compactions.Compactor(224): Compacting 2e48ae9ad14e49d99812aa2aea2005e9, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=357, earliestPutTs=1733480379478 2024-12-06T10:19:40,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(8581): Flush requested on 7b4c4e64c91c50413b4c0cd97a01bcb8 2024-12-06T10:19:40,415 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 7b4c4e64c91c50413b4c0cd97a01bcb8 3/3 column families, dataSize=80.51 KB heapSize=211.69 KB 2024-12-06T10:19:40,415 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7b4c4e64c91c50413b4c0cd97a01bcb8, store=A 2024-12-06T10:19:40,415 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:19:40,415 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7b4c4e64c91c50413b4c0cd97a01bcb8, store=B 2024-12-06T10:19:40,415 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:19:40,416 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7b4c4e64c91c50413b4c0cd97a01bcb8, store=C 2024-12-06T10:19:40,416 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:19:40,416 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=7b4c4e64c91c50413b4c0cd97a01bcb8] 2024-12-06T10:19:40,420 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e2024120641aa4ac7d60c4f25923adb4ca9f93834_7b4c4e64c91c50413b4c0cd97a01bcb8 store=[table=TestAcidGuarantees family=A region=7b4c4e64c91c50413b4c0cd97a01bcb8] 2024-12-06T10:19:40,421 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 7b4c4e64c91c50413b4c0cd97a01bcb8#B#compaction#603 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-06T10:19:40,421 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/.tmp/B/5c364b56f3fa4023867ed169054b7504 is 50, key is test_row_0/B:col10/1733480379789/Put/seqid=0 2024-12-06T10:19:40,422 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024120678a136f8a2b54321840ce72ba4162ad9_7b4c4e64c91c50413b4c0cd97a01bcb8 is 50, key is test_row_0/A:col10/1733480379799/Put/seqid=0 2024-12-06T10:19:40,423 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e2024120641aa4ac7d60c4f25923adb4ca9f93834_7b4c4e64c91c50413b4c0cd97a01bcb8, store=[table=TestAcidGuarantees family=A region=7b4c4e64c91c50413b4c0cd97a01bcb8] 2024-12-06T10:19:40,423 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024120641aa4ac7d60c4f25923adb4ca9f93834_7b4c4e64c91c50413b4c0cd97a01bcb8 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=7b4c4e64c91c50413b4c0cd97a01bcb8] 2024-12-06T10:19:40,429 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742542_1718 (size=12454) 2024-12-06T10:19:40,430 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742541_1717 (size=13119) 2024-12-06T10:19:40,433 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742543_1719 (size=4469) 2024-12-06T10:19:40,436 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/.tmp/B/5c364b56f3fa4023867ed169054b7504 as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/B/5c364b56f3fa4023867ed169054b7504 2024-12-06T10:19:40,441 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 7b4c4e64c91c50413b4c0cd97a01bcb8/B of 7b4c4e64c91c50413b4c0cd97a01bcb8 into 5c364b56f3fa4023867ed169054b7504(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-06T10:19:40,441 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 7b4c4e64c91c50413b4c0cd97a01bcb8: 2024-12-06T10:19:40,441 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8., storeName=7b4c4e64c91c50413b4c0cd97a01bcb8/B, priority=12, startTime=1733480380404; duration=0sec 2024-12-06T10:19:40,441 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-06T10:19:40,441 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 7b4c4e64c91c50413b4c0cd97a01bcb8:B 2024-12-06T10:19:40,441 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-06T10:19:40,442 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49886 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-06T10:19:40,443 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HStore(1540): 7b4c4e64c91c50413b4c0cd97a01bcb8/C is initiating minor compaction (all files) 2024-12-06T10:19:40,443 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 7b4c4e64c91c50413b4c0cd97a01bcb8/C in TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8. 2024-12-06T10:19:40,443 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/C/4bfeab0051d74a88a647cf8469ab02a6, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/C/292dd1fb3d5d47feb7269c0039520212, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/C/4cdb39fab76546dfa1fe834817772248, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/C/9c5b49e8720e4c27b484d47bcfb7fb9b] into tmpdir=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/.tmp, totalSize=48.7 K 2024-12-06T10:19:40,443 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.Compactor(224): Compacting 4bfeab0051d74a88a647cf8469ab02a6, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=295, earliestPutTs=1733480375590 2024-12-06T10:19:40,448 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.Compactor(224): Compacting 292dd1fb3d5d47feb7269c0039520212, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=318, earliestPutTs=1733480377333 2024-12-06T10:19:40,449 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.Compactor(224): Compacting 4cdb39fab76546dfa1fe834817772248, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=335, earliestPutTs=1733480377342 2024-12-06T10:19:40,456 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] compactions.Compactor(224): Compacting 9c5b49e8720e4c27b484d47bcfb7fb9b, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=357, earliestPutTs=1733480379478 2024-12-06T10:19:40,463 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 7b4c4e64c91c50413b4c0cd97a01bcb8#C#compaction#605 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-06T10:19:40,463 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/.tmp/C/088fca46a16d4d95bf255819b8d532d1 is 50, key is test_row_0/C:col10/1733480379789/Put/seqid=0 2024-12-06T10:19:40,469 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742544_1720 (size=13119) 2024-12-06T10:19:40,473 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/.tmp/C/088fca46a16d4d95bf255819b8d532d1 as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/C/088fca46a16d4d95bf255819b8d532d1 2024-12-06T10:19:40,480 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 7b4c4e64c91c50413b4c0cd97a01bcb8/C of 7b4c4e64c91c50413b4c0cd97a01bcb8 into 088fca46a16d4d95bf255819b8d532d1(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-06T10:19:40,480 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 7b4c4e64c91c50413b4c0cd97a01bcb8: 2024-12-06T10:19:40,480 INFO [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8., storeName=7b4c4e64c91c50413b4c0cd97a01bcb8/C, priority=12, startTime=1733480380405; duration=0sec 2024-12-06T10:19:40,480 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T10:19:40,480 DEBUG [RS:0;552d6a33fa09:33397-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 7b4c4e64c91c50413b4c0cd97a01bcb8:C 2024-12-06T10:19:40,481 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:40,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 187 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57006 deadline: 1733480440440, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:40,485 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:40,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 185 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56996 deadline: 1733480440444, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:40,485 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:40,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 194 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56948 deadline: 1733480440481, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:40,485 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:40,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 187 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57004 deadline: 1733480440481, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:40,486 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:40,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 189 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57012 deadline: 1733480440481, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:40,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=179 2024-12-06T10:19:40,540 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,33397,1733480204743 2024-12-06T10:19:40,541 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33397 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=180 2024-12-06T10:19:40,541 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8. 2024-12-06T10:19:40,541 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8. as already flushing 2024-12-06T10:19:40,541 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8. 2024-12-06T10:19:40,541 ERROR [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=180}] handler.RSProcedureHandler(58): pid=180 java.io.IOException: Unable to complete flush {ENCODED => 7b4c4e64c91c50413b4c0cd97a01bcb8, NAME => 'TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:19:40,541 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=180 java.io.IOException: Unable to complete flush {ENCODED => 7b4c4e64c91c50413b4c0cd97a01bcb8, NAME => 'TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:19:40,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33169 {}] master.HMaster(4114): Remote procedure failed, pid=180 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7b4c4e64c91c50413b4c0cd97a01bcb8, NAME => 'TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7b4c4e64c91c50413b4c0cd97a01bcb8, NAME => 'TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:19:40,584 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:40,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 189 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57006 deadline: 1733480440582, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:40,588 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:40,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 187 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56996 deadline: 1733480440586, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:40,588 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:40,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 196 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56948 deadline: 1733480440586, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:40,589 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:40,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 189 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57004 deadline: 1733480440586, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:40,589 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:40,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 191 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57012 deadline: 1733480440587, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:40,694 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,33397,1733480204743 2024-12-06T10:19:40,694 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33397 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=180 2024-12-06T10:19:40,694 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8. 2024-12-06T10:19:40,694 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8. as already flushing 2024-12-06T10:19:40,694 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8. 2024-12-06T10:19:40,694 ERROR [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=180}] handler.RSProcedureHandler(58): pid=180 java.io.IOException: Unable to complete flush {ENCODED => 7b4c4e64c91c50413b4c0cd97a01bcb8, NAME => 'TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:19:40,695 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=180 java.io.IOException: Unable to complete flush {ENCODED => 7b4c4e64c91c50413b4c0cd97a01bcb8, NAME => 'TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:19:40,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33169 {}] master.HMaster(4114): Remote procedure failed, pid=180 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7b4c4e64c91c50413b4c0cd97a01bcb8, NAME => 'TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7b4c4e64c91c50413b4c0cd97a01bcb8, NAME => 'TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:19:40,787 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:40,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 191 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57006 deadline: 1733480440785, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:40,790 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:40,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 189 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56996 deadline: 1733480440789, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:40,792 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:40,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 191 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57004 deadline: 1733480440790, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:40,792 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:40,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 198 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56948 deadline: 1733480440790, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:40,794 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:40,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 193 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57012 deadline: 1733480440791, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:40,830 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:19:40,833 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024120678a136f8a2b54321840ce72ba4162ad9_7b4c4e64c91c50413b4c0cd97a01bcb8 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024120678a136f8a2b54321840ce72ba4162ad9_7b4c4e64c91c50413b4c0cd97a01bcb8 2024-12-06T10:19:40,834 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 7b4c4e64c91c50413b4c0cd97a01bcb8#A#compaction#602 average throughput is 0.06 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-06T10:19:40,834 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/.tmp/A/bb6d7d6c8b01427db7b7061efe33dc00, store: [table=TestAcidGuarantees family=A region=7b4c4e64c91c50413b4c0cd97a01bcb8] 2024-12-06T10:19:40,834 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/.tmp/A/c75bc4adc075431186a7ce8924002a7a is 175, key is test_row_0/A:col10/1733480379789/Put/seqid=0 2024-12-06T10:19:40,835 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/.tmp/A/bb6d7d6c8b01427db7b7061efe33dc00 is 175, key is test_row_0/A:col10/1733480379799/Put/seqid=0 2024-12-06T10:19:40,838 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742545_1721 (size=32073) 2024-12-06T10:19:40,839 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742546_1722 (size=31255) 2024-12-06T10:19:40,843 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/.tmp/A/c75bc4adc075431186a7ce8924002a7a as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/A/c75bc4adc075431186a7ce8924002a7a 2024-12-06T10:19:40,846 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 7b4c4e64c91c50413b4c0cd97a01bcb8/A of 7b4c4e64c91c50413b4c0cd97a01bcb8 into c75bc4adc075431186a7ce8924002a7a(size=31.3 K), total size for store is 31.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-06T10:19:40,846 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 7b4c4e64c91c50413b4c0cd97a01bcb8: 2024-12-06T10:19:40,846 INFO [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8., storeName=7b4c4e64c91c50413b4c0cd97a01bcb8/A, priority=12, startTime=1733480380404; duration=0sec 2024-12-06T10:19:40,846 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T10:19:40,846 DEBUG [RS:0;552d6a33fa09:33397-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 7b4c4e64c91c50413b4c0cd97a01bcb8:A 2024-12-06T10:19:40,847 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,33397,1733480204743 2024-12-06T10:19:40,847 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33397 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=180 2024-12-06T10:19:40,847 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8. 2024-12-06T10:19:40,847 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8. as already flushing 2024-12-06T10:19:40,847 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8. 2024-12-06T10:19:40,847 ERROR [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=180}] handler.RSProcedureHandler(58): pid=180 java.io.IOException: Unable to complete flush {ENCODED => 7b4c4e64c91c50413b4c0cd97a01bcb8, NAME => 'TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:19:40,847 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=180 java.io.IOException: Unable to complete flush {ENCODED => 7b4c4e64c91c50413b4c0cd97a01bcb8, NAME => 'TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:19:40,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33169 {}] master.HMaster(4114): Remote procedure failed, pid=180 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7b4c4e64c91c50413b4c0cd97a01bcb8, NAME => 'TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7b4c4e64c91c50413b4c0cd97a01bcb8, NAME => 'TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:19:40,999 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,33397,1733480204743 2024-12-06T10:19:41,000 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33397 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=180 2024-12-06T10:19:41,000 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8. 2024-12-06T10:19:41,000 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8. as already flushing 2024-12-06T10:19:41,000 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8. 2024-12-06T10:19:41,000 ERROR [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=180}] handler.RSProcedureHandler(58): pid=180 java.io.IOException: Unable to complete flush {ENCODED => 7b4c4e64c91c50413b4c0cd97a01bcb8, NAME => 'TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:19:41,000 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=180 java.io.IOException: Unable to complete flush {ENCODED => 7b4c4e64c91c50413b4c0cd97a01bcb8, NAME => 'TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:19:41,001 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33169 {}] master.HMaster(4114): Remote procedure failed, pid=180 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7b4c4e64c91c50413b4c0cd97a01bcb8, NAME => 'TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7b4c4e64c91c50413b4c0cd97a01bcb8, NAME => 'TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:19:41,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=179 2024-12-06T10:19:41,080 DEBUG [Thread-2730 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x5f1754bc to 127.0.0.1:61610 2024-12-06T10:19:41,080 DEBUG [Thread-2730 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-06T10:19:41,081 DEBUG [Thread-2734 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x5bb75907 to 127.0.0.1:61610 2024-12-06T10:19:41,081 DEBUG [Thread-2734 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-06T10:19:41,081 DEBUG [Thread-2728 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x7846cb78 to 127.0.0.1:61610 2024-12-06T10:19:41,081 DEBUG [Thread-2728 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-06T10:19:41,083 DEBUG [Thread-2726 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x6e757135 to 127.0.0.1:61610 2024-12-06T10:19:41,083 DEBUG [Thread-2726 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-06T10:19:41,084 DEBUG [Thread-2732 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x3d9113f3 to 127.0.0.1:61610 2024-12-06T10:19:41,084 DEBUG [Thread-2732 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-06T10:19:41,091 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:41,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 193 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57006 deadline: 1733480441090, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:41,092 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:41,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 191 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56996 deadline: 1733480441092, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:41,093 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:41,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 193 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57004 deadline: 1733480441093, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:41,093 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:41,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 200 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56948 deadline: 1733480441093, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:41,095 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:41,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 195 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57012 deadline: 1733480441095, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:41,152 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,33397,1733480204743 2024-12-06T10:19:41,152 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33397 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=180 2024-12-06T10:19:41,152 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8. 2024-12-06T10:19:41,152 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8. as already flushing 2024-12-06T10:19:41,152 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8. 2024-12-06T10:19:41,152 ERROR [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=180}] handler.RSProcedureHandler(58): pid=180 java.io.IOException: Unable to complete flush {ENCODED => 7b4c4e64c91c50413b4c0cd97a01bcb8, NAME => 'TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:19:41,153 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=180 java.io.IOException: Unable to complete flush {ENCODED => 7b4c4e64c91c50413b4c0cd97a01bcb8, NAME => 'TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:19:41,153 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33169 {}] master.HMaster(4114): Remote procedure failed, pid=180 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7b4c4e64c91c50413b4c0cd97a01bcb8, NAME => 'TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7b4c4e64c91c50413b4c0cd97a01bcb8, NAME => 'TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:19:41,239 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=372, memsize=26.8 K, hasBloomFilter=true, into tmp file hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/.tmp/A/bb6d7d6c8b01427db7b7061efe33dc00 2024-12-06T10:19:41,244 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/.tmp/B/d3e317fd0d734c61bdb1582c6d0d4470 is 50, key is test_row_0/B:col10/1733480379799/Put/seqid=0 2024-12-06T10:19:41,247 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742547_1723 (size=12301) 2024-12-06T10:19:41,304 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,33397,1733480204743 2024-12-06T10:19:41,304 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33397 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=180 2024-12-06T10:19:41,305 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8. 2024-12-06T10:19:41,305 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8. as already flushing 2024-12-06T10:19:41,305 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8. 2024-12-06T10:19:41,305 ERROR [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=180}] handler.RSProcedureHandler(58): pid=180 java.io.IOException: Unable to complete flush {ENCODED => 7b4c4e64c91c50413b4c0cd97a01bcb8, NAME => 'TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:19:41,305 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=180 java.io.IOException: Unable to complete flush {ENCODED => 7b4c4e64c91c50413b4c0cd97a01bcb8, NAME => 'TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:19:41,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33169 {}] master.HMaster(4114): Remote procedure failed, pid=180 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7b4c4e64c91c50413b4c0cd97a01bcb8, NAME => 'TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7b4c4e64c91c50413b4c0cd97a01bcb8, NAME => 'TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:19:41,456 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,33397,1733480204743 2024-12-06T10:19:41,457 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33397 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=180 2024-12-06T10:19:41,457 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8. 2024-12-06T10:19:41,457 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8. as already flushing 2024-12-06T10:19:41,457 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8. 2024-12-06T10:19:41,457 ERROR [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=180}] handler.RSProcedureHandler(58): pid=180 java.io.IOException: Unable to complete flush {ENCODED => 7b4c4e64c91c50413b4c0cd97a01bcb8, NAME => 'TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:19:41,457 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=180 java.io.IOException: Unable to complete flush {ENCODED => 7b4c4e64c91c50413b4c0cd97a01bcb8, NAME => 'TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:19:41,458 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33169 {}] master.HMaster(4114): Remote procedure failed, pid=180 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7b4c4e64c91c50413b4c0cd97a01bcb8, NAME => 'TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7b4c4e64c91c50413b4c0cd97a01bcb8, NAME => 'TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:19:41,596 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:41,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 197 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57012 deadline: 1733480441596, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:41,596 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:41,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 195 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57006 deadline: 1733480441596, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:41,597 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:41,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 193 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56996 deadline: 1733480441597, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:41,597 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:41,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 202 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56948 deadline: 1733480441597, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:41,598 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:19:41,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] ipc.CallRunner(138): callId: 195 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57004 deadline: 1733480441598, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:41,609 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,33397,1733480204743 2024-12-06T10:19:41,609 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33397 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=180 2024-12-06T10:19:41,609 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8. 2024-12-06T10:19:41,609 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8. as already flushing 2024-12-06T10:19:41,609 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8. 2024-12-06T10:19:41,609 ERROR [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=180}] handler.RSProcedureHandler(58): pid=180 java.io.IOException: Unable to complete flush {ENCODED => 7b4c4e64c91c50413b4c0cd97a01bcb8, NAME => 'TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:19:41,609 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=180 java.io.IOException: Unable to complete flush {ENCODED => 7b4c4e64c91c50413b4c0cd97a01bcb8, NAME => 'TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:19:41,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33169 {}] master.HMaster(4114): Remote procedure failed, pid=180 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7b4c4e64c91c50413b4c0cd97a01bcb8, NAME => 'TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7b4c4e64c91c50413b4c0cd97a01bcb8, NAME => 'TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:19:41,648 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=372 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/.tmp/B/d3e317fd0d734c61bdb1582c6d0d4470 2024-12-06T10:19:41,653 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/.tmp/C/ba060d0e0adc49d0aeb8d365f1a72f06 is 50, key is test_row_0/C:col10/1733480379799/Put/seqid=0 2024-12-06T10:19:41,655 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742548_1724 (size=12301) 2024-12-06T10:19:41,761 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,33397,1733480204743 2024-12-06T10:19:41,761 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33397 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=180 2024-12-06T10:19:41,761 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8. 2024-12-06T10:19:41,761 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8. as already flushing 2024-12-06T10:19:41,761 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8. 2024-12-06T10:19:41,761 ERROR [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=180}] handler.RSProcedureHandler(58): pid=180 java.io.IOException: Unable to complete flush {ENCODED => 7b4c4e64c91c50413b4c0cd97a01bcb8, NAME => 'TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:19:41,762 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=180 java.io.IOException: Unable to complete flush {ENCODED => 7b4c4e64c91c50413b4c0cd97a01bcb8, NAME => 'TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:19:41,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33169 {}] master.HMaster(4114): Remote procedure failed, pid=180 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7b4c4e64c91c50413b4c0cd97a01bcb8, NAME => 'TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7b4c4e64c91c50413b4c0cd97a01bcb8, NAME => 'TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:19:41,913 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,33397,1733480204743 2024-12-06T10:19:41,913 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33397 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=180 2024-12-06T10:19:41,914 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8. 2024-12-06T10:19:41,914 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8. as already flushing 2024-12-06T10:19:41,914 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8. 2024-12-06T10:19:41,914 ERROR [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=180}] handler.RSProcedureHandler(58): pid=180 java.io.IOException: Unable to complete flush {ENCODED => 7b4c4e64c91c50413b4c0cd97a01bcb8, NAME => 'TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:19:41,914 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=180 java.io.IOException: Unable to complete flush {ENCODED => 7b4c4e64c91c50413b4c0cd97a01bcb8, NAME => 'TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:19:41,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33169 {}] master.HMaster(4114): Remote procedure failed, pid=180 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7b4c4e64c91c50413b4c0cd97a01bcb8, NAME => 'TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7b4c4e64c91c50413b4c0cd97a01bcb8, NAME => 'TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:19:42,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=179 2024-12-06T10:19:42,056 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=372 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/.tmp/C/ba060d0e0adc49d0aeb8d365f1a72f06 2024-12-06T10:19:42,059 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/.tmp/A/bb6d7d6c8b01427db7b7061efe33dc00 as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/A/bb6d7d6c8b01427db7b7061efe33dc00 2024-12-06T10:19:42,061 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/A/bb6d7d6c8b01427db7b7061efe33dc00, entries=150, sequenceid=372, filesize=30.5 K 2024-12-06T10:19:42,062 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/.tmp/B/d3e317fd0d734c61bdb1582c6d0d4470 as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/B/d3e317fd0d734c61bdb1582c6d0d4470 2024-12-06T10:19:42,064 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/B/d3e317fd0d734c61bdb1582c6d0d4470, entries=150, sequenceid=372, filesize=12.0 K 2024-12-06T10:19:42,064 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/.tmp/C/ba060d0e0adc49d0aeb8d365f1a72f06 as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/C/ba060d0e0adc49d0aeb8d365f1a72f06 2024-12-06T10:19:42,065 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,33397,1733480204743 2024-12-06T10:19:42,066 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33397 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=180 2024-12-06T10:19:42,066 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8. 2024-12-06T10:19:42,066 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8. as already flushing 2024-12-06T10:19:42,066 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8. 2024-12-06T10:19:42,066 ERROR [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=180}] handler.RSProcedureHandler(58): pid=180 java.io.IOException: Unable to complete flush {ENCODED => 7b4c4e64c91c50413b4c0cd97a01bcb8, NAME => 'TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:19:42,066 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=180 java.io.IOException: Unable to complete flush {ENCODED => 7b4c4e64c91c50413b4c0cd97a01bcb8, NAME => 'TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:19:42,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33169 {}] master.HMaster(4114): Remote procedure failed, pid=180 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7b4c4e64c91c50413b4c0cd97a01bcb8, NAME => 'TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7b4c4e64c91c50413b4c0cd97a01bcb8, NAME => 'TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:19:42,067 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/C/ba060d0e0adc49d0aeb8d365f1a72f06, entries=150, sequenceid=372, filesize=12.0 K 2024-12-06T10:19:42,067 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~80.51 KB/82440, heapSize ~211.64 KB/216720, currentSize=127.47 KB/130530 for 7b4c4e64c91c50413b4c0cd97a01bcb8 in 1652ms, sequenceid=372, compaction requested=false 2024-12-06T10:19:42,067 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 7b4c4e64c91c50413b4c0cd97a01bcb8: 2024-12-06T10:19:42,217 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,33397,1733480204743 2024-12-06T10:19:42,218 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33397 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=180 2024-12-06T10:19:42,218 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8. 2024-12-06T10:19:42,218 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.HRegion(2837): Flushing 7b4c4e64c91c50413b4c0cd97a01bcb8 3/3 column families, dataSize=127.47 KB heapSize=334.73 KB 2024-12-06T10:19:42,218 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7b4c4e64c91c50413b4c0cd97a01bcb8, store=A 2024-12-06T10:19:42,218 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:19:42,218 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7b4c4e64c91c50413b4c0cd97a01bcb8, store=B 2024-12-06T10:19:42,218 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:19:42,218 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7b4c4e64c91c50413b4c0cd97a01bcb8, store=C 2024-12-06T10:19:42,218 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:19:42,223 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=180}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241206c4329ea499b54a14b339c7d5f7e6911c_7b4c4e64c91c50413b4c0cd97a01bcb8 is 50, key is test_row_0/A:col10/1733480380443/Put/seqid=0 2024-12-06T10:19:42,226 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742549_1725 (size=12454) 2024-12-06T10:19:42,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33397 {}] regionserver.HRegion(8581): Flush requested on 7b4c4e64c91c50413b4c0cd97a01bcb8 2024-12-06T10:19:42,601 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8. as already flushing 2024-12-06T10:19:42,601 DEBUG [Thread-2721 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x6e047c09 to 127.0.0.1:61610 2024-12-06T10:19:42,601 DEBUG [Thread-2721 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-06T10:19:42,601 DEBUG [Thread-2719 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x103dfc6e to 127.0.0.1:61610 2024-12-06T10:19:42,601 DEBUG [Thread-2719 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-06T10:19:42,605 DEBUG [Thread-2715 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x537a66f8 to 127.0.0.1:61610 2024-12-06T10:19:42,605 DEBUG [Thread-2715 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-06T10:19:42,606 DEBUG [Thread-2723 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x60d631a3 to 127.0.0.1:61610 2024-12-06T10:19:42,606 DEBUG [Thread-2723 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-06T10:19:42,607 DEBUG [Thread-2717 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x06094c70 to 127.0.0.1:61610 2024-12-06T10:19:42,607 DEBUG [Thread-2717 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-06T10:19:42,627 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=180}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:19:42,630 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241206c4329ea499b54a14b339c7d5f7e6911c_7b4c4e64c91c50413b4c0cd97a01bcb8 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241206c4329ea499b54a14b339c7d5f7e6911c_7b4c4e64c91c50413b4c0cd97a01bcb8 2024-12-06T10:19:42,631 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=180}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/.tmp/A/cb1ade7410f14da19b1620dcf888b98d, store: [table=TestAcidGuarantees family=A region=7b4c4e64c91c50413b4c0cd97a01bcb8] 2024-12-06T10:19:42,632 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=180}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/.tmp/A/cb1ade7410f14da19b1620dcf888b98d is 175, key is test_row_0/A:col10/1733480380443/Put/seqid=0 2024-12-06T10:19:42,637 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742550_1726 (size=31255) 2024-12-06T10:19:43,037 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=180}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=397, memsize=42.5 K, hasBloomFilter=true, into tmp file hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/.tmp/A/cb1ade7410f14da19b1620dcf888b98d 2024-12-06T10:19:43,042 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=180}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/.tmp/B/d229e8c305164f0e8f3f26f41e9b2cbf is 50, key is test_row_0/B:col10/1733480380443/Put/seqid=0 2024-12-06T10:19:43,046 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742551_1727 (size=12301) 2024-12-06T10:19:43,056 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-06T10:19:43,446 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=397 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/.tmp/B/d229e8c305164f0e8f3f26f41e9b2cbf 2024-12-06T10:19:43,452 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=180}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/.tmp/C/5113abea86ac43349ddc7bb4c609910a is 50, key is test_row_0/C:col10/1733480380443/Put/seqid=0 2024-12-06T10:19:43,454 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742552_1728 (size=12301) 2024-12-06T10:19:43,455 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=397 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/.tmp/C/5113abea86ac43349ddc7bb4c609910a 2024-12-06T10:19:43,457 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/.tmp/A/cb1ade7410f14da19b1620dcf888b98d as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/A/cb1ade7410f14da19b1620dcf888b98d 2024-12-06T10:19:43,459 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/A/cb1ade7410f14da19b1620dcf888b98d, entries=150, sequenceid=397, filesize=30.5 K 2024-12-06T10:19:43,460 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/.tmp/B/d229e8c305164f0e8f3f26f41e9b2cbf as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/B/d229e8c305164f0e8f3f26f41e9b2cbf 2024-12-06T10:19:43,462 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/B/d229e8c305164f0e8f3f26f41e9b2cbf, entries=150, sequenceid=397, filesize=12.0 K 2024-12-06T10:19:43,462 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/.tmp/C/5113abea86ac43349ddc7bb4c609910a as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/C/5113abea86ac43349ddc7bb4c609910a 2024-12-06T10:19:43,464 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/C/5113abea86ac43349ddc7bb4c609910a, entries=150, sequenceid=397, filesize=12.0 K 2024-12-06T10:19:43,465 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.HRegion(3040): Finished flush of dataSize ~127.47 KB/130530, heapSize ~334.69 KB/342720, currentSize=33.54 KB/34350 for 7b4c4e64c91c50413b4c0cd97a01bcb8 in 1247ms, sequenceid=397, compaction requested=true 2024-12-06T10:19:43,465 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.HRegion(2538): Flush status journal for 7b4c4e64c91c50413b4c0cd97a01bcb8: 2024-12-06T10:19:43,465 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8. 2024-12-06T10:19:43,465 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=180 2024-12-06T10:19:43,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33169 {}] master.HMaster(4106): Remote procedure done, pid=180 2024-12-06T10:19:43,467 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=180, resume processing ppid=179 2024-12-06T10:19:43,467 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=180, ppid=179, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 3.5360 sec 2024-12-06T10:19:43,468 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=179, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=179, table=TestAcidGuarantees in 3.5400 sec 2024-12-06T10:19:44,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=179 2024-12-06T10:19:44,034 INFO [Thread-2725 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 179 completed 2024-12-06T10:19:44,034 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(392): Finished test. Writers: 2024-12-06T10:19:44,034 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 69 2024-12-06T10:19:44,034 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 62 2024-12-06T10:19:44,034 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 62 2024-12-06T10:19:44,034 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 64 2024-12-06T10:19:44,034 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 60 2024-12-06T10:19:44,034 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(396): Readers: 2024-12-06T10:19:44,034 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 5821 2024-12-06T10:19:44,034 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 5851 2024-12-06T10:19:44,034 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 5681 2024-12-06T10:19:44,034 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 5802 2024-12-06T10:19:44,034 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 5843 2024-12-06T10:19:44,034 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(400): Scanners: 2024-12-06T10:19:44,034 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-12-06T10:19:44,034 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x2209c520 to 127.0.0.1:61610 2024-12-06T10:19:44,034 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-06T10:19:44,035 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of TestAcidGuarantees 2024-12-06T10:19:44,035 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33169 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.2 disable TestAcidGuarantees 2024-12-06T10:19:44,036 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33169 {}] procedure2.ProcedureExecutor(1098): Stored pid=181, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=TestAcidGuarantees 2024-12-06T10:19:44,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=181 2024-12-06T10:19:44,037 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733480384037"}]},"ts":"1733480384037"} 2024-12-06T10:19:44,038 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLING in hbase:meta 2024-12-06T10:19:44,040 INFO [PEWorker-4 {}] procedure.DisableTableProcedure(284): Set TestAcidGuarantees to state=DISABLING 2024-12-06T10:19:44,041 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=182, ppid=181, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=TestAcidGuarantees}] 2024-12-06T10:19:44,041 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=183, ppid=182, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=7b4c4e64c91c50413b4c0cd97a01bcb8, UNASSIGN}] 2024-12-06T10:19:44,042 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=183, ppid=182, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=7b4c4e64c91c50413b4c0cd97a01bcb8, UNASSIGN 2024-12-06T10:19:44,042 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=183 updating hbase:meta row=7b4c4e64c91c50413b4c0cd97a01bcb8, regionState=CLOSING, regionLocation=552d6a33fa09,33397,1733480204743 2024-12-06T10:19:44,043 DEBUG [PEWorker-3 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-06T10:19:44,043 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=184, ppid=183, state=RUNNABLE; CloseRegionProcedure 7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743}] 2024-12-06T10:19:44,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=181 2024-12-06T10:19:44,194 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,33397,1733480204743 2024-12-06T10:19:44,194 INFO [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION, pid=184}] handler.UnassignRegionHandler(124): Close 7b4c4e64c91c50413b4c0cd97a01bcb8 2024-12-06T10:19:44,194 DEBUG [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION, pid=184}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-06T10:19:44,194 DEBUG [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION, pid=184}] regionserver.HRegion(1681): Closing 7b4c4e64c91c50413b4c0cd97a01bcb8, disabling compactions & flushes 2024-12-06T10:19:44,195 INFO [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION, pid=184}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8. 2024-12-06T10:19:44,195 DEBUG [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION, pid=184}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8. 2024-12-06T10:19:44,195 DEBUG [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION, pid=184}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8. after waiting 0 ms 2024-12-06T10:19:44,195 DEBUG [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION, pid=184}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8. 2024-12-06T10:19:44,195 INFO [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION, pid=184}] regionserver.HRegion(2837): Flushing 7b4c4e64c91c50413b4c0cd97a01bcb8 3/3 column families, dataSize=33.54 KB heapSize=88.64 KB 2024-12-06T10:19:44,195 DEBUG [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION, pid=184}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7b4c4e64c91c50413b4c0cd97a01bcb8, store=A 2024-12-06T10:19:44,195 DEBUG [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION, pid=184}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:19:44,195 DEBUG [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION, pid=184}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7b4c4e64c91c50413b4c0cd97a01bcb8, store=B 2024-12-06T10:19:44,195 DEBUG [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION, pid=184}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:19:44,195 DEBUG [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION, pid=184}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7b4c4e64c91c50413b4c0cd97a01bcb8, store=C 2024-12-06T10:19:44,195 DEBUG [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION, pid=184}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T10:19:44,199 DEBUG [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION, pid=184}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241206453a5aae8c7a401182c0b2388dea3b69_7b4c4e64c91c50413b4c0cd97a01bcb8 is 50, key is test_row_0/A:col10/1733480382606/Put/seqid=0 2024-12-06T10:19:44,202 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742553_1729 (size=9914) 2024-12-06T10:19:44,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=181 2024-12-06T10:19:44,603 DEBUG [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION, pid=184}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:19:44,605 INFO [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION, pid=184}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241206453a5aae8c7a401182c0b2388dea3b69_7b4c4e64c91c50413b4c0cd97a01bcb8 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241206453a5aae8c7a401182c0b2388dea3b69_7b4c4e64c91c50413b4c0cd97a01bcb8 2024-12-06T10:19:44,606 DEBUG [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION, pid=184}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/.tmp/A/ce3bb0a022a949f7ac266150d5224054, store: [table=TestAcidGuarantees family=A region=7b4c4e64c91c50413b4c0cd97a01bcb8] 2024-12-06T10:19:44,607 DEBUG [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION, pid=184}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/.tmp/A/ce3bb0a022a949f7ac266150d5224054 is 175, key is test_row_0/A:col10/1733480382606/Put/seqid=0 2024-12-06T10:19:44,609 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742554_1730 (size=22561) 2024-12-06T10:19:44,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=181 2024-12-06T10:19:45,010 INFO [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION, pid=184}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=405, memsize=11.2 K, hasBloomFilter=true, into tmp file hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/.tmp/A/ce3bb0a022a949f7ac266150d5224054 2024-12-06T10:19:45,015 DEBUG [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION, pid=184}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/.tmp/B/de189b5dbbab4133ad414b42a408a4d0 is 50, key is test_row_0/B:col10/1733480382606/Put/seqid=0 2024-12-06T10:19:45,018 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742555_1731 (size=9857) 2024-12-06T10:19:45,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=181 2024-12-06T10:19:45,419 INFO [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION, pid=184}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=405 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/.tmp/B/de189b5dbbab4133ad414b42a408a4d0 2024-12-06T10:19:45,424 DEBUG [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION, pid=184}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/.tmp/C/ac0bd6b6a8004a43ba4003ab9c33c507 is 50, key is test_row_0/C:col10/1733480382606/Put/seqid=0 2024-12-06T10:19:45,426 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742556_1732 (size=9857) 2024-12-06T10:19:45,427 INFO [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION, pid=184}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=405 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/.tmp/C/ac0bd6b6a8004a43ba4003ab9c33c507 2024-12-06T10:19:45,430 DEBUG [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION, pid=184}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/.tmp/A/ce3bb0a022a949f7ac266150d5224054 as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/A/ce3bb0a022a949f7ac266150d5224054 2024-12-06T10:19:45,432 INFO [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION, pid=184}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/A/ce3bb0a022a949f7ac266150d5224054, entries=100, sequenceid=405, filesize=22.0 K 2024-12-06T10:19:45,432 DEBUG [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION, pid=184}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/.tmp/B/de189b5dbbab4133ad414b42a408a4d0 as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/B/de189b5dbbab4133ad414b42a408a4d0 2024-12-06T10:19:45,435 INFO [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION, pid=184}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/B/de189b5dbbab4133ad414b42a408a4d0, entries=100, sequenceid=405, filesize=9.6 K 2024-12-06T10:19:45,435 DEBUG [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION, pid=184}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/.tmp/C/ac0bd6b6a8004a43ba4003ab9c33c507 as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/C/ac0bd6b6a8004a43ba4003ab9c33c507 2024-12-06T10:19:45,437 INFO [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION, pid=184}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/C/ac0bd6b6a8004a43ba4003ab9c33c507, entries=100, sequenceid=405, filesize=9.6 K 2024-12-06T10:19:45,438 INFO [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION, pid=184}] regionserver.HRegion(3040): Finished flush of dataSize ~33.54 KB/34350, heapSize ~88.59 KB/90720, currentSize=0 B/0 for 7b4c4e64c91c50413b4c0cd97a01bcb8 in 1243ms, sequenceid=405, compaction requested=true 2024-12-06T10:19:45,438 DEBUG [StoreCloser-TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/A/f1b5dbfc6b944a43a26c5c8b5e6d5632, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/A/9246c144fff44629bafdda6a7387eef4, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/A/23ef566befe84409b1bdb3489884c46d, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/A/9401937f16034e12a4848ce8547897a2, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/A/7310a28789df41299915f976d0a4c078, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/A/c1888d4732ea4881bcbed55ff89cb1ec, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/A/a810c086858942feb968bad54b0f8fbe, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/A/bf57646105824ae1be2c6a682a0918b4, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/A/06d49e9a567e400cb249e98440c7f424, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/A/cf86d8f05b4f48a898287bd08367c2c5, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/A/bdddaaf26fc6429d87c4f91d6a43a28c, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/A/e56e88d4849a446b93e0d1891c9826e8, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/A/45c5720d7e9e4bca974cb5892f5c5a26, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/A/3780fdb5d5294bbebeb7ffeca248c8d9, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/A/a9582847497a4644afa56a1c46390610, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/A/04d7ff1a217a4c40ae67cda6ecf35f63, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/A/4c07ff91e8464650a9f5892771ecd0a1, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/A/90e355b49e304873a4b47aa8c18f8724, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/A/1873c7c0e73f46508be89edb54429db2, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/A/987569584db54786810eb6f54c7c7fc2, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/A/bfb25a0119264023a81da272038504fd, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/A/6d9ce4be09c64cd18915d448a21fdcb7, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/A/e48fa0cb80214cc69a85c75f1c7451dd, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/A/2e48ae9ad14e49d99812aa2aea2005e9] to archive 2024-12-06T10:19:45,439 DEBUG [StoreCloser-TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-12-06T10:19:45,440 DEBUG [StoreCloser-TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/A/f1b5dbfc6b944a43a26c5c8b5e6d5632 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/A/f1b5dbfc6b944a43a26c5c8b5e6d5632 2024-12-06T10:19:45,441 DEBUG [StoreCloser-TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/A/9246c144fff44629bafdda6a7387eef4 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/A/9246c144fff44629bafdda6a7387eef4 2024-12-06T10:19:45,442 DEBUG [StoreCloser-TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/A/23ef566befe84409b1bdb3489884c46d to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/A/23ef566befe84409b1bdb3489884c46d 2024-12-06T10:19:45,442 DEBUG [StoreCloser-TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/A/9401937f16034e12a4848ce8547897a2 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/A/9401937f16034e12a4848ce8547897a2 2024-12-06T10:19:45,443 DEBUG [StoreCloser-TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/A/7310a28789df41299915f976d0a4c078 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/A/7310a28789df41299915f976d0a4c078 2024-12-06T10:19:45,444 DEBUG [StoreCloser-TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/A/c1888d4732ea4881bcbed55ff89cb1ec to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/A/c1888d4732ea4881bcbed55ff89cb1ec 2024-12-06T10:19:45,444 DEBUG [StoreCloser-TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/A/a810c086858942feb968bad54b0f8fbe to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/A/a810c086858942feb968bad54b0f8fbe 2024-12-06T10:19:45,445 DEBUG [StoreCloser-TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/A/bf57646105824ae1be2c6a682a0918b4 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/A/bf57646105824ae1be2c6a682a0918b4 2024-12-06T10:19:45,446 DEBUG [StoreCloser-TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/A/06d49e9a567e400cb249e98440c7f424 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/A/06d49e9a567e400cb249e98440c7f424 2024-12-06T10:19:45,447 DEBUG [StoreCloser-TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/A/cf86d8f05b4f48a898287bd08367c2c5 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/A/cf86d8f05b4f48a898287bd08367c2c5 2024-12-06T10:19:45,447 DEBUG [StoreCloser-TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/A/bdddaaf26fc6429d87c4f91d6a43a28c to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/A/bdddaaf26fc6429d87c4f91d6a43a28c 2024-12-06T10:19:45,448 DEBUG [StoreCloser-TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/A/e56e88d4849a446b93e0d1891c9826e8 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/A/e56e88d4849a446b93e0d1891c9826e8 2024-12-06T10:19:45,449 DEBUG [StoreCloser-TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/A/45c5720d7e9e4bca974cb5892f5c5a26 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/A/45c5720d7e9e4bca974cb5892f5c5a26 2024-12-06T10:19:45,449 DEBUG [StoreCloser-TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/A/3780fdb5d5294bbebeb7ffeca248c8d9 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/A/3780fdb5d5294bbebeb7ffeca248c8d9 2024-12-06T10:19:45,450 DEBUG [StoreCloser-TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/A/a9582847497a4644afa56a1c46390610 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/A/a9582847497a4644afa56a1c46390610 2024-12-06T10:19:45,451 DEBUG [StoreCloser-TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/A/04d7ff1a217a4c40ae67cda6ecf35f63 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/A/04d7ff1a217a4c40ae67cda6ecf35f63 2024-12-06T10:19:45,451 DEBUG [StoreCloser-TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/A/4c07ff91e8464650a9f5892771ecd0a1 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/A/4c07ff91e8464650a9f5892771ecd0a1 2024-12-06T10:19:45,452 DEBUG [StoreCloser-TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/A/90e355b49e304873a4b47aa8c18f8724 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/A/90e355b49e304873a4b47aa8c18f8724 2024-12-06T10:19:45,453 DEBUG [StoreCloser-TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/A/1873c7c0e73f46508be89edb54429db2 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/A/1873c7c0e73f46508be89edb54429db2 2024-12-06T10:19:45,453 DEBUG [StoreCloser-TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/A/987569584db54786810eb6f54c7c7fc2 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/A/987569584db54786810eb6f54c7c7fc2 2024-12-06T10:19:45,454 DEBUG [StoreCloser-TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/A/bfb25a0119264023a81da272038504fd to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/A/bfb25a0119264023a81da272038504fd 2024-12-06T10:19:45,455 DEBUG [StoreCloser-TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/A/6d9ce4be09c64cd18915d448a21fdcb7 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/A/6d9ce4be09c64cd18915d448a21fdcb7 2024-12-06T10:19:45,455 DEBUG [StoreCloser-TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/A/e48fa0cb80214cc69a85c75f1c7451dd to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/A/e48fa0cb80214cc69a85c75f1c7451dd 2024-12-06T10:19:45,456 DEBUG [StoreCloser-TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/A/2e48ae9ad14e49d99812aa2aea2005e9 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/A/2e48ae9ad14e49d99812aa2aea2005e9 2024-12-06T10:19:45,457 DEBUG [StoreCloser-TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/B/965fbea9298740cc869b9a4b02d4a093, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/B/74ff1341db0640b38fbace278747a395, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/B/2acf90690380429ca7d7a68e703e1dd7, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/B/318db86340304353b65bb18350242ce7, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/B/fce1ff2e5f4845d4b5c32a745525eab9, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/B/4f0a94191a8e4f609bcb61b4bb0a196f, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/B/580a7d4d87f24596a104af357208de64, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/B/ae007bc2044b4754aa8a9b81cab4b26d, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/B/de9ce77002464b3e9c6b336f77ec1476, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/B/bc8e69329e06421d9b3df9c5c67a61ab, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/B/5df2647ef06344fbbea96e1bbbbd4db9, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/B/0307124cc9eb4fe2828d172194dd5c7e, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/B/03788a449d14443fbb68dfd672955f1d, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/B/fe7ea649e15b47b49d421cd56bd28eda, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/B/b752fe9f0506465d86ff8015d180eca4, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/B/3706886ff78d4d46b5ee9ac3923e3806, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/B/983efaefac5d4a3c8af447dc19681c1c, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/B/7b8c40d6a1904399a6e16fe07c23133f, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/B/f7f2acc3dbf14c6d9dd26a91b4bf1585, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/B/7dfaa15b15e24fc6bba1c371ab66c237, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/B/54e9d59d842e424f8264da2db5d7cb1c, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/B/7779e1792bc4414b819b79931c96f1be, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/B/b6fecdf8708d4593827e02d3044c1fa8, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/B/f4ff39b1516e42a5948f9048efd65725] to archive 2024-12-06T10:19:45,458 DEBUG [StoreCloser-TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-12-06T10:19:45,459 DEBUG [StoreCloser-TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/B/965fbea9298740cc869b9a4b02d4a093 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/B/965fbea9298740cc869b9a4b02d4a093 2024-12-06T10:19:45,460 DEBUG [StoreCloser-TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/B/74ff1341db0640b38fbace278747a395 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/B/74ff1341db0640b38fbace278747a395 2024-12-06T10:19:45,461 DEBUG [StoreCloser-TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/B/2acf90690380429ca7d7a68e703e1dd7 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/B/2acf90690380429ca7d7a68e703e1dd7 2024-12-06T10:19:45,461 DEBUG [StoreCloser-TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/B/318db86340304353b65bb18350242ce7 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/B/318db86340304353b65bb18350242ce7 2024-12-06T10:19:45,462 DEBUG [StoreCloser-TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/B/fce1ff2e5f4845d4b5c32a745525eab9 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/B/fce1ff2e5f4845d4b5c32a745525eab9 2024-12-06T10:19:45,463 DEBUG [StoreCloser-TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/B/4f0a94191a8e4f609bcb61b4bb0a196f to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/B/4f0a94191a8e4f609bcb61b4bb0a196f 2024-12-06T10:19:45,464 DEBUG [StoreCloser-TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/B/580a7d4d87f24596a104af357208de64 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/B/580a7d4d87f24596a104af357208de64 2024-12-06T10:19:45,465 DEBUG [StoreCloser-TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/B/ae007bc2044b4754aa8a9b81cab4b26d to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/B/ae007bc2044b4754aa8a9b81cab4b26d 2024-12-06T10:19:45,466 DEBUG [StoreCloser-TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/B/de9ce77002464b3e9c6b336f77ec1476 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/B/de9ce77002464b3e9c6b336f77ec1476 2024-12-06T10:19:45,466 DEBUG [StoreCloser-TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/B/bc8e69329e06421d9b3df9c5c67a61ab to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/B/bc8e69329e06421d9b3df9c5c67a61ab 2024-12-06T10:19:45,467 DEBUG [StoreCloser-TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/B/5df2647ef06344fbbea96e1bbbbd4db9 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/B/5df2647ef06344fbbea96e1bbbbd4db9 2024-12-06T10:19:45,468 DEBUG [StoreCloser-TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/B/0307124cc9eb4fe2828d172194dd5c7e to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/B/0307124cc9eb4fe2828d172194dd5c7e 2024-12-06T10:19:45,469 DEBUG [StoreCloser-TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/B/03788a449d14443fbb68dfd672955f1d to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/B/03788a449d14443fbb68dfd672955f1d 2024-12-06T10:19:45,470 DEBUG [StoreCloser-TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/B/fe7ea649e15b47b49d421cd56bd28eda to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/B/fe7ea649e15b47b49d421cd56bd28eda 2024-12-06T10:19:45,471 DEBUG [StoreCloser-TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/B/b752fe9f0506465d86ff8015d180eca4 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/B/b752fe9f0506465d86ff8015d180eca4 2024-12-06T10:19:45,471 DEBUG [StoreCloser-TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/B/3706886ff78d4d46b5ee9ac3923e3806 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/B/3706886ff78d4d46b5ee9ac3923e3806 2024-12-06T10:19:45,472 DEBUG [StoreCloser-TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/B/983efaefac5d4a3c8af447dc19681c1c to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/B/983efaefac5d4a3c8af447dc19681c1c 2024-12-06T10:19:45,473 DEBUG [StoreCloser-TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/B/7b8c40d6a1904399a6e16fe07c23133f to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/B/7b8c40d6a1904399a6e16fe07c23133f 2024-12-06T10:19:45,474 DEBUG [StoreCloser-TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/B/f7f2acc3dbf14c6d9dd26a91b4bf1585 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/B/f7f2acc3dbf14c6d9dd26a91b4bf1585 2024-12-06T10:19:45,475 DEBUG [StoreCloser-TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/B/7dfaa15b15e24fc6bba1c371ab66c237 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/B/7dfaa15b15e24fc6bba1c371ab66c237 2024-12-06T10:19:45,475 DEBUG [StoreCloser-TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/B/54e9d59d842e424f8264da2db5d7cb1c to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/B/54e9d59d842e424f8264da2db5d7cb1c 2024-12-06T10:19:45,476 DEBUG [StoreCloser-TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/B/7779e1792bc4414b819b79931c96f1be to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/B/7779e1792bc4414b819b79931c96f1be 2024-12-06T10:19:45,477 DEBUG [StoreCloser-TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/B/b6fecdf8708d4593827e02d3044c1fa8 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/B/b6fecdf8708d4593827e02d3044c1fa8 2024-12-06T10:19:45,478 DEBUG [StoreCloser-TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/B/f4ff39b1516e42a5948f9048efd65725 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/B/f4ff39b1516e42a5948f9048efd65725 2024-12-06T10:19:45,479 DEBUG [StoreCloser-TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/C/26da14df03c44b82aad240b4260275fa, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/C/31b58417249a4cf3951d50eceabff774, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/C/b57c8b92a4724f54b2c02477712d692b, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/C/74f58e385dc040fab52121c0ec493686, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/C/9836089d406e4b9bb99c0bab5cc6b064, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/C/5c9538a0decd47a891fb13dc1b4f7663, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/C/47c087f5b34a4c91ae360ed1b1448e7c, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/C/a2a3446154084f8cab590263e738b47f, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/C/38e4e9970d35450ba2b3e842748e194c, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/C/706c0fcd9e3f4f269c9ab64704065d08, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/C/8219808a4bba4ad1955ed274539e28b3, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/C/3ee830ee3c5f4809be4fe4f04ccbd42f, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/C/d01c348c72094819888d72a376ace071, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/C/f1ef3f0c801147969459c3eb9fcbbca3, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/C/4140854a20a84db993ea0e74a123d79a, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/C/457ae07b6b6c43aa8d8b1c71109242bc, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/C/30d7cb6499394cbc9cac4be95994f409, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/C/e42225d60ec447118afbee16a149e5c8, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/C/f44072578a6745c195655e3b536e49b5, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/C/4bfeab0051d74a88a647cf8469ab02a6, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/C/dcc624091f954ab9b8fcac8a4930b39a, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/C/292dd1fb3d5d47feb7269c0039520212, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/C/4cdb39fab76546dfa1fe834817772248, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/C/9c5b49e8720e4c27b484d47bcfb7fb9b] to archive 2024-12-06T10:19:45,479 DEBUG [StoreCloser-TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-12-06T10:19:45,480 DEBUG [StoreCloser-TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/C/26da14df03c44b82aad240b4260275fa to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/C/26da14df03c44b82aad240b4260275fa 2024-12-06T10:19:45,481 DEBUG [StoreCloser-TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/C/31b58417249a4cf3951d50eceabff774 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/C/31b58417249a4cf3951d50eceabff774 2024-12-06T10:19:45,482 DEBUG [StoreCloser-TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/C/b57c8b92a4724f54b2c02477712d692b to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/C/b57c8b92a4724f54b2c02477712d692b 2024-12-06T10:19:45,483 DEBUG [StoreCloser-TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/C/74f58e385dc040fab52121c0ec493686 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/C/74f58e385dc040fab52121c0ec493686 2024-12-06T10:19:45,483 DEBUG [StoreCloser-TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/C/9836089d406e4b9bb99c0bab5cc6b064 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/C/9836089d406e4b9bb99c0bab5cc6b064 2024-12-06T10:19:45,484 DEBUG [StoreCloser-TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/C/5c9538a0decd47a891fb13dc1b4f7663 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/C/5c9538a0decd47a891fb13dc1b4f7663 2024-12-06T10:19:45,485 DEBUG [StoreCloser-TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/C/47c087f5b34a4c91ae360ed1b1448e7c to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/C/47c087f5b34a4c91ae360ed1b1448e7c 2024-12-06T10:19:45,485 DEBUG [StoreCloser-TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/C/a2a3446154084f8cab590263e738b47f to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/C/a2a3446154084f8cab590263e738b47f 2024-12-06T10:19:45,486 DEBUG [StoreCloser-TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/C/38e4e9970d35450ba2b3e842748e194c to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/C/38e4e9970d35450ba2b3e842748e194c 2024-12-06T10:19:45,487 DEBUG [StoreCloser-TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/C/706c0fcd9e3f4f269c9ab64704065d08 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/C/706c0fcd9e3f4f269c9ab64704065d08 2024-12-06T10:19:45,487 DEBUG [StoreCloser-TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/C/8219808a4bba4ad1955ed274539e28b3 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/C/8219808a4bba4ad1955ed274539e28b3 2024-12-06T10:19:45,488 DEBUG [StoreCloser-TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/C/3ee830ee3c5f4809be4fe4f04ccbd42f to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/C/3ee830ee3c5f4809be4fe4f04ccbd42f 2024-12-06T10:19:45,489 DEBUG [StoreCloser-TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/C/d01c348c72094819888d72a376ace071 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/C/d01c348c72094819888d72a376ace071 2024-12-06T10:19:45,490 DEBUG [StoreCloser-TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/C/f1ef3f0c801147969459c3eb9fcbbca3 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/C/f1ef3f0c801147969459c3eb9fcbbca3 2024-12-06T10:19:45,490 DEBUG [StoreCloser-TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/C/4140854a20a84db993ea0e74a123d79a to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/C/4140854a20a84db993ea0e74a123d79a 2024-12-06T10:19:45,491 DEBUG [StoreCloser-TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/C/457ae07b6b6c43aa8d8b1c71109242bc to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/C/457ae07b6b6c43aa8d8b1c71109242bc 2024-12-06T10:19:45,492 DEBUG [StoreCloser-TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/C/30d7cb6499394cbc9cac4be95994f409 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/C/30d7cb6499394cbc9cac4be95994f409 2024-12-06T10:19:45,492 DEBUG [StoreCloser-TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/C/e42225d60ec447118afbee16a149e5c8 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/C/e42225d60ec447118afbee16a149e5c8 2024-12-06T10:19:45,493 DEBUG [StoreCloser-TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/C/f44072578a6745c195655e3b536e49b5 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/C/f44072578a6745c195655e3b536e49b5 2024-12-06T10:19:45,494 DEBUG [StoreCloser-TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/C/4bfeab0051d74a88a647cf8469ab02a6 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/C/4bfeab0051d74a88a647cf8469ab02a6 2024-12-06T10:19:45,495 DEBUG [StoreCloser-TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/C/dcc624091f954ab9b8fcac8a4930b39a to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/C/dcc624091f954ab9b8fcac8a4930b39a 2024-12-06T10:19:45,495 DEBUG [StoreCloser-TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/C/292dd1fb3d5d47feb7269c0039520212 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/C/292dd1fb3d5d47feb7269c0039520212 2024-12-06T10:19:45,496 DEBUG [StoreCloser-TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/C/4cdb39fab76546dfa1fe834817772248 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/C/4cdb39fab76546dfa1fe834817772248 2024-12-06T10:19:45,497 DEBUG [StoreCloser-TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/C/9c5b49e8720e4c27b484d47bcfb7fb9b to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/C/9c5b49e8720e4c27b484d47bcfb7fb9b 2024-12-06T10:19:45,500 DEBUG [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION, pid=184}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/recovered.edits/408.seqid, newMaxSeqId=408, maxSeqId=4 2024-12-06T10:19:45,500 INFO [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION, pid=184}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8. 2024-12-06T10:19:45,500 DEBUG [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION, pid=184}] regionserver.HRegion(1635): Region close journal for 7b4c4e64c91c50413b4c0cd97a01bcb8: 2024-12-06T10:19:45,502 INFO [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION, pid=184}] handler.UnassignRegionHandler(170): Closed 7b4c4e64c91c50413b4c0cd97a01bcb8 2024-12-06T10:19:45,502 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=183 updating hbase:meta row=7b4c4e64c91c50413b4c0cd97a01bcb8, regionState=CLOSED 2024-12-06T10:19:45,504 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=184, resume processing ppid=183 2024-12-06T10:19:45,504 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=184, ppid=183, state=SUCCESS; CloseRegionProcedure 7b4c4e64c91c50413b4c0cd97a01bcb8, server=552d6a33fa09,33397,1733480204743 in 1.4600 sec 2024-12-06T10:19:45,505 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=183, resume processing ppid=182 2024-12-06T10:19:45,505 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=183, ppid=182, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=7b4c4e64c91c50413b4c0cd97a01bcb8, UNASSIGN in 1.4630 sec 2024-12-06T10:19:45,506 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=182, resume processing ppid=181 2024-12-06T10:19:45,506 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=182, ppid=181, state=SUCCESS; CloseTableRegionsProcedure table=TestAcidGuarantees in 1.4640 sec 2024-12-06T10:19:45,507 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733480385507"}]},"ts":"1733480385507"} 2024-12-06T10:19:45,507 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLED in hbase:meta 2024-12-06T10:19:45,509 INFO [PEWorker-5 {}] procedure.DisableTableProcedure(296): Set TestAcidGuarantees to state=DISABLED 2024-12-06T10:19:45,510 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=181, state=SUCCESS; DisableTableProcedure table=TestAcidGuarantees in 1.4750 sec 2024-12-06T10:19:46,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=181 2024-12-06T10:19:46,141 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:TestAcidGuarantees, procId: 181 completed 2024-12-06T10:19:46,141 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33169 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.2 delete TestAcidGuarantees 2024-12-06T10:19:46,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33169 {}] procedure2.ProcedureExecutor(1098): Stored pid=185, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=TestAcidGuarantees 2024-12-06T10:19:46,142 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=185, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-06T10:19:46,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=185 2024-12-06T10:19:46,143 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=185, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-06T10:19:46,144 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8 2024-12-06T10:19:46,145 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/A, FileablePath, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/B, FileablePath, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/C, FileablePath, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/recovered.edits] 2024-12-06T10:19:46,147 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/A/bb6d7d6c8b01427db7b7061efe33dc00 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/A/bb6d7d6c8b01427db7b7061efe33dc00 2024-12-06T10:19:46,148 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/A/c75bc4adc075431186a7ce8924002a7a to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/A/c75bc4adc075431186a7ce8924002a7a 2024-12-06T10:19:46,149 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/A/cb1ade7410f14da19b1620dcf888b98d to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/A/cb1ade7410f14da19b1620dcf888b98d 2024-12-06T10:19:46,150 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/A/ce3bb0a022a949f7ac266150d5224054 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/A/ce3bb0a022a949f7ac266150d5224054 2024-12-06T10:19:46,151 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/B/5c364b56f3fa4023867ed169054b7504 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/B/5c364b56f3fa4023867ed169054b7504 2024-12-06T10:19:46,152 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/B/d229e8c305164f0e8f3f26f41e9b2cbf to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/B/d229e8c305164f0e8f3f26f41e9b2cbf 2024-12-06T10:19:46,153 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/B/d3e317fd0d734c61bdb1582c6d0d4470 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/B/d3e317fd0d734c61bdb1582c6d0d4470 2024-12-06T10:19:46,154 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/B/de189b5dbbab4133ad414b42a408a4d0 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/B/de189b5dbbab4133ad414b42a408a4d0 2024-12-06T10:19:46,155 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/C/088fca46a16d4d95bf255819b8d532d1 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/C/088fca46a16d4d95bf255819b8d532d1 2024-12-06T10:19:46,156 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/C/5113abea86ac43349ddc7bb4c609910a to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/C/5113abea86ac43349ddc7bb4c609910a 2024-12-06T10:19:46,157 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/C/ac0bd6b6a8004a43ba4003ab9c33c507 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/C/ac0bd6b6a8004a43ba4003ab9c33c507 2024-12-06T10:19:46,157 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/C/ba060d0e0adc49d0aeb8d365f1a72f06 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/C/ba060d0e0adc49d0aeb8d365f1a72f06 2024-12-06T10:19:46,159 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/recovered.edits/408.seqid to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8/recovered.edits/408.seqid 2024-12-06T10:19:46,160 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/default/TestAcidGuarantees/7b4c4e64c91c50413b4c0cd97a01bcb8 2024-12-06T10:19:46,160 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(313): Archived TestAcidGuarantees regions 2024-12-06T10:19:46,160 DEBUG [PEWorker-3 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3 2024-12-06T10:19:46,161 DEBUG [PEWorker-3 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A] 2024-12-06T10:19:46,163 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412061759df2738904bffaafa006c17772b36_7b4c4e64c91c50413b4c0cd97a01bcb8 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412061759df2738904bffaafa006c17772b36_7b4c4e64c91c50413b4c0cd97a01bcb8 2024-12-06T10:19:46,164 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024120624fec424247c457095ae81b05841bab3_7b4c4e64c91c50413b4c0cd97a01bcb8 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024120624fec424247c457095ae81b05841bab3_7b4c4e64c91c50413b4c0cd97a01bcb8 2024-12-06T10:19:46,165 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412062fcf5d0990cb422cbcd5d2fea95029e3_7b4c4e64c91c50413b4c0cd97a01bcb8 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412062fcf5d0990cb422cbcd5d2fea95029e3_7b4c4e64c91c50413b4c0cd97a01bcb8 2024-12-06T10:19:46,166 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241206453a5aae8c7a401182c0b2388dea3b69_7b4c4e64c91c50413b4c0cd97a01bcb8 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241206453a5aae8c7a401182c0b2388dea3b69_7b4c4e64c91c50413b4c0cd97a01bcb8 2024-12-06T10:19:46,167 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024120647f6fc51f8b14d95bf5452b9e925ca83_7b4c4e64c91c50413b4c0cd97a01bcb8 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024120647f6fc51f8b14d95bf5452b9e925ca83_7b4c4e64c91c50413b4c0cd97a01bcb8 2024-12-06T10:19:46,167 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241206570fe4e2682141f68e3e066ffc1f66c2_7b4c4e64c91c50413b4c0cd97a01bcb8 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241206570fe4e2682141f68e3e066ffc1f66c2_7b4c4e64c91c50413b4c0cd97a01bcb8 2024-12-06T10:19:46,168 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241206571ee31ebd384460a73f8652ef705cec_7b4c4e64c91c50413b4c0cd97a01bcb8 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241206571ee31ebd384460a73f8652ef705cec_7b4c4e64c91c50413b4c0cd97a01bcb8 2024-12-06T10:19:46,169 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412065c9fa3a38fed4041a8a2a9e6d25bc178_7b4c4e64c91c50413b4c0cd97a01bcb8 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412065c9fa3a38fed4041a8a2a9e6d25bc178_7b4c4e64c91c50413b4c0cd97a01bcb8 2024-12-06T10:19:46,170 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412066049de116b494ccbb42600a872ebfd3e_7b4c4e64c91c50413b4c0cd97a01bcb8 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412066049de116b494ccbb42600a872ebfd3e_7b4c4e64c91c50413b4c0cd97a01bcb8 2024-12-06T10:19:46,171 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024120660908016222f4f8596891169b5d91000_7b4c4e64c91c50413b4c0cd97a01bcb8 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024120660908016222f4f8596891169b5d91000_7b4c4e64c91c50413b4c0cd97a01bcb8 2024-12-06T10:19:46,171 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412067713126145f344549b384c37d304998d_7b4c4e64c91c50413b4c0cd97a01bcb8 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412067713126145f344549b384c37d304998d_7b4c4e64c91c50413b4c0cd97a01bcb8 2024-12-06T10:19:46,172 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024120678a136f8a2b54321840ce72ba4162ad9_7b4c4e64c91c50413b4c0cd97a01bcb8 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024120678a136f8a2b54321840ce72ba4162ad9_7b4c4e64c91c50413b4c0cd97a01bcb8 2024-12-06T10:19:46,173 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241206790f2528d9724facb8a45b1c194da347_7b4c4e64c91c50413b4c0cd97a01bcb8 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241206790f2528d9724facb8a45b1c194da347_7b4c4e64c91c50413b4c0cd97a01bcb8 2024-12-06T10:19:46,174 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024120691079ae49c95431e92d527c52224e5cc_7b4c4e64c91c50413b4c0cd97a01bcb8 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024120691079ae49c95431e92d527c52224e5cc_7b4c4e64c91c50413b4c0cd97a01bcb8 2024-12-06T10:19:46,175 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412069ccc8198519a4d418395bffba55ee895_7b4c4e64c91c50413b4c0cd97a01bcb8 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412069ccc8198519a4d418395bffba55ee895_7b4c4e64c91c50413b4c0cd97a01bcb8 2024-12-06T10:19:46,175 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241206b9e2cbb3af304cecb8e2061b2a399023_7b4c4e64c91c50413b4c0cd97a01bcb8 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241206b9e2cbb3af304cecb8e2061b2a399023_7b4c4e64c91c50413b4c0cd97a01bcb8 2024-12-06T10:19:46,176 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241206bfc6daba86e14a0787e51a10f3271b2a_7b4c4e64c91c50413b4c0cd97a01bcb8 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241206bfc6daba86e14a0787e51a10f3271b2a_7b4c4e64c91c50413b4c0cd97a01bcb8 2024-12-06T10:19:46,177 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241206c4329ea499b54a14b339c7d5f7e6911c_7b4c4e64c91c50413b4c0cd97a01bcb8 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241206c4329ea499b54a14b339c7d5f7e6911c_7b4c4e64c91c50413b4c0cd97a01bcb8 2024-12-06T10:19:46,178 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241206d1f20d87b88f462db801c821ee8b8cd9_7b4c4e64c91c50413b4c0cd97a01bcb8 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241206d1f20d87b88f462db801c821ee8b8cd9_7b4c4e64c91c50413b4c0cd97a01bcb8 2024-12-06T10:19:46,178 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241206e58bcfcce41f4114bcf73f56035a008d_7b4c4e64c91c50413b4c0cd97a01bcb8 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241206e58bcfcce41f4114bcf73f56035a008d_7b4c4e64c91c50413b4c0cd97a01bcb8 2024-12-06T10:19:46,179 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241206f32e33bc489b4f30841b9d396d8f0f44_7b4c4e64c91c50413b4c0cd97a01bcb8 to hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241206f32e33bc489b4f30841b9d396d8f0f44_7b4c4e64c91c50413b4c0cd97a01bcb8 2024-12-06T10:19:46,180 DEBUG [PEWorker-3 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3 2024-12-06T10:19:46,181 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=185, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-06T10:19:46,183 WARN [PEWorker-3 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 1 rows of TestAcidGuarantees from hbase:meta 2024-12-06T10:19:46,185 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(407): Removing 'TestAcidGuarantees' descriptor. 2024-12-06T10:19:46,185 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=185, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-06T10:19:46,185 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(397): Removing 'TestAcidGuarantees' from region states. 2024-12-06T10:19:46,185 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733480386185"}]},"ts":"9223372036854775807"} 2024-12-06T10:19:46,187 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1808): Deleted 1 regions from META 2024-12-06T10:19:46,187 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => 7b4c4e64c91c50413b4c0cd97a01bcb8, NAME => 'TestAcidGuarantees,,1733480357989.7b4c4e64c91c50413b4c0cd97a01bcb8.', STARTKEY => '', ENDKEY => ''}] 2024-12-06T10:19:46,187 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(401): Marking 'TestAcidGuarantees' as deleted. 2024-12-06T10:19:46,187 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1733480386187"}]},"ts":"9223372036854775807"} 2024-12-06T10:19:46,188 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1678): Deleted table TestAcidGuarantees state from META 2024-12-06T10:19:46,190 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(133): Finished pid=185, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-06T10:19:46,190 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=185, state=SUCCESS; DeleteTableProcedure table=TestAcidGuarantees in 49 msec 2024-12-06T10:19:46,244 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=185 2024-12-06T10:19:46,244 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:TestAcidGuarantees, procId: 185 completed 2024-12-06T10:19:46,254 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: TestAcidGuaranteesWithAdaptivePolicy#testMobGetAtomicity Thread=238 (was 240), OpenFileDescriptor=452 (was 454), MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=427 (was 442), ProcessCount=9 (was 11), AvailableMemoryMB=7187 (was 6151) - AvailableMemoryMB LEAK? - 2024-12-06T10:19:46,254 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1340): Shutting down minicluster 2024-12-06T10:19:46,254 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-12-06T10:19:46,254 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x46873e4f to 127.0.0.1:61610 2024-12-06T10:19:46,254 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-06T10:19:46,254 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-12-06T10:19:46,254 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=955959901, stopped=false 2024-12-06T10:19:46,255 INFO [Time-limited test {}] master.ServerManager(987): Cluster shutdown requested of master=552d6a33fa09,33169,1733480203965 2024-12-06T10:19:46,257 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33397-0x10066d47c570001, quorum=127.0.0.1:61610, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-06T10:19:46,257 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33169-0x10066d47c570000, quorum=127.0.0.1:61610, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-06T10:19:46,257 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33397-0x10066d47c570001, quorum=127.0.0.1:61610, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T10:19:46,257 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33169-0x10066d47c570000, quorum=127.0.0.1:61610, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T10:19:46,257 INFO [Time-limited test {}] procedure2.ProcedureExecutor(700): Stopping 2024-12-06T10:19:46,257 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:33397-0x10066d47c570001, quorum=127.0.0.1:61610, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-06T10:19:46,257 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:33169-0x10066d47c570000, quorum=127.0.0.1:61610, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-06T10:19:46,258 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-06T10:19:46,258 INFO [Time-limited test {}] regionserver.HRegionServer(2561): ***** STOPPING region server '552d6a33fa09,33397,1733480204743' ***** 2024-12-06T10:19:46,258 INFO [Time-limited test {}] regionserver.HRegionServer(2575): STOPPED: Shutdown requested 2024-12-06T10:19:46,258 INFO [RS:0;552d6a33fa09:33397 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-06T10:19:46,258 INFO [RS:0;552d6a33fa09:33397 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-06T10:19:46,258 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(373): MemStoreFlusher.0 exiting 2024-12-06T10:19:46,258 INFO [RS:0;552d6a33fa09:33397 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-06T10:19:46,258 INFO [RS:0;552d6a33fa09:33397 {}] regionserver.HRegionServer(3579): Received CLOSE for 4d3009c066fbf23693b61104c76d0d3b 2024-12-06T10:19:46,259 INFO [RS:0;552d6a33fa09:33397 {}] regionserver.HRegionServer(1224): stopping server 552d6a33fa09,33397,1733480204743 2024-12-06T10:19:46,259 DEBUG [RS:0;552d6a33fa09:33397 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-06T10:19:46,259 INFO [RS:0;552d6a33fa09:33397 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-06T10:19:46,259 INFO [RS:0;552d6a33fa09:33397 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-06T10:19:46,259 INFO [RS:0;552d6a33fa09:33397 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-06T10:19:46,259 INFO [RS:0;552d6a33fa09:33397 {}] regionserver.HRegionServer(3579): Received CLOSE for 1588230740 2024-12-06T10:19:46,259 INFO [RS:0;552d6a33fa09:33397 {}] regionserver.HRegionServer(1599): Waiting on 2 regions to close 2024-12-06T10:19:46,259 DEBUG [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1681): Closing 4d3009c066fbf23693b61104c76d0d3b, disabling compactions & flushes 2024-12-06T10:19:46,259 DEBUG [RS:0;552d6a33fa09:33397 {}] regionserver.HRegionServer(1603): Online Regions={4d3009c066fbf23693b61104c76d0d3b=hbase:namespace,,1733480208369.4d3009c066fbf23693b61104c76d0d3b., 1588230740=hbase:meta,,1.1588230740} 2024-12-06T10:19:46,259 INFO [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1703): Closing region hbase:namespace,,1733480208369.4d3009c066fbf23693b61104c76d0d3b. 2024-12-06T10:19:46,259 DEBUG [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:namespace,,1733480208369.4d3009c066fbf23693b61104c76d0d3b. 2024-12-06T10:19:46,259 DEBUG [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1791): Acquired close lock on hbase:namespace,,1733480208369.4d3009c066fbf23693b61104c76d0d3b. after waiting 0 ms 2024-12-06T10:19:46,259 DEBUG [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1801): Updates disabled for region hbase:namespace,,1733480208369.4d3009c066fbf23693b61104c76d0d3b. 2024-12-06T10:19:46,259 INFO [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2837): Flushing 4d3009c066fbf23693b61104c76d0d3b 1/1 column families, dataSize=78 B heapSize=488 B 2024-12-06T10:19:46,259 DEBUG [RS_CLOSE_META-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1681): Closing 1588230740, disabling compactions & flushes 2024-12-06T10:19:46,259 INFO [RS_CLOSE_META-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1703): Closing region hbase:meta,,1.1588230740 2024-12-06T10:19:46,259 DEBUG [RS_CLOSE_META-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:meta,,1.1588230740 2024-12-06T10:19:46,259 DEBUG [RS_CLOSE_META-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1791): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-06T10:19:46,259 DEBUG [RS_CLOSE_META-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1801): Updates disabled for region hbase:meta,,1.1588230740 2024-12-06T10:19:46,259 INFO [RS_CLOSE_META-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2837): Flushing 1588230740 3/3 column families, dataSize=20.55 KB heapSize=35.87 KB 2024-12-06T10:19:46,262 DEBUG [RS:0;552d6a33fa09:33397 {}] regionserver.HRegionServer(1629): Waiting on 1588230740, 4d3009c066fbf23693b61104c76d0d3b 2024-12-06T10:19:46,276 DEBUG [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/hbase/namespace/4d3009c066fbf23693b61104c76d0d3b/.tmp/info/1c6d9dc6043c46c382017cf6d4e51d44 is 45, key is default/info:d/1733480209322/Put/seqid=0 2024-12-06T10:19:46,279 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742557_1733 (size=5037) 2024-12-06T10:19:46,282 DEBUG [RS_CLOSE_META-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/hbase/meta/1588230740/.tmp/info/e73ff030214c452e94d315f3d1a348b7 is 143, key is hbase:namespace,,1733480208369.4d3009c066fbf23693b61104c76d0d3b./info:regioninfo/1733480209203/Put/seqid=0 2024-12-06T10:19:46,283 INFO [regionserver/552d6a33fa09:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-06T10:19:46,285 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742558_1734 (size=7725) 2024-12-06T10:19:46,463 DEBUG [RS:0;552d6a33fa09:33397 {}] regionserver.HRegionServer(1629): Waiting on 1588230740, 4d3009c066fbf23693b61104c76d0d3b 2024-12-06T10:19:46,663 DEBUG [RS:0;552d6a33fa09:33397 {}] regionserver.HRegionServer(1629): Waiting on 1588230740, 4d3009c066fbf23693b61104c76d0d3b 2024-12-06T10:19:46,680 INFO [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=78 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/hbase/namespace/4d3009c066fbf23693b61104c76d0d3b/.tmp/info/1c6d9dc6043c46c382017cf6d4e51d44 2024-12-06T10:19:46,683 DEBUG [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/hbase/namespace/4d3009c066fbf23693b61104c76d0d3b/.tmp/info/1c6d9dc6043c46c382017cf6d4e51d44 as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/hbase/namespace/4d3009c066fbf23693b61104c76d0d3b/info/1c6d9dc6043c46c382017cf6d4e51d44 2024-12-06T10:19:46,685 INFO [RS_CLOSE_META-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=93 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/hbase/meta/1588230740/.tmp/info/e73ff030214c452e94d315f3d1a348b7 2024-12-06T10:19:46,686 INFO [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/hbase/namespace/4d3009c066fbf23693b61104c76d0d3b/info/1c6d9dc6043c46c382017cf6d4e51d44, entries=2, sequenceid=6, filesize=4.9 K 2024-12-06T10:19:46,686 INFO [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3040): Finished flush of dataSize ~78 B/78, heapSize ~472 B/472, currentSize=0 B/0 for 4d3009c066fbf23693b61104c76d0d3b in 427ms, sequenceid=6, compaction requested=false 2024-12-06T10:19:46,689 DEBUG [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/hbase/namespace/4d3009c066fbf23693b61104c76d0d3b/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-06T10:19:46,690 INFO [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1922): Closed hbase:namespace,,1733480208369.4d3009c066fbf23693b61104c76d0d3b. 2024-12-06T10:19:46,690 DEBUG [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1635): Region close journal for 4d3009c066fbf23693b61104c76d0d3b: 2024-12-06T10:19:46,690 DEBUG [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed hbase:namespace,,1733480208369.4d3009c066fbf23693b61104c76d0d3b. 2024-12-06T10:19:46,703 DEBUG [RS_CLOSE_META-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/hbase/meta/1588230740/.tmp/rep_barrier/8dba1966744044afbabcc68ae47de6b4 is 102, key is TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df./rep_barrier:/1733480242774/DeleteFamily/seqid=0 2024-12-06T10:19:46,706 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742559_1735 (size=6025) 2024-12-06T10:19:46,863 DEBUG [RS:0;552d6a33fa09:33397 {}] regionserver.HRegionServer(1629): Waiting on 1588230740 2024-12-06T10:19:47,020 INFO [regionserver/552d6a33fa09:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-12-06T10:19:47,021 INFO [regionserver/552d6a33fa09:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-12-06T10:19:47,063 DEBUG [RS:0;552d6a33fa09:33397 {}] regionserver.HRegionServer(1629): Waiting on 1588230740 2024-12-06T10:19:47,107 INFO [RS_CLOSE_META-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=588 B at sequenceid=93 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/hbase/meta/1588230740/.tmp/rep_barrier/8dba1966744044afbabcc68ae47de6b4 2024-12-06T10:19:47,125 DEBUG [RS_CLOSE_META-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/hbase/meta/1588230740/.tmp/table/690449f4f59b4ad19f0bf42e41f175f7 is 96, key is TestAcidGuarantees,,1733480209515.b58170106b3730174deb9625aeac23df./table:/1733480242774/DeleteFamily/seqid=0 2024-12-06T10:19:47,128 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742560_1736 (size=5942) 2024-12-06T10:19:47,264 INFO [RS:0;552d6a33fa09:33397 {}] regionserver.HRegionServer(1599): Waiting on 1 regions to close 2024-12-06T10:19:47,264 DEBUG [RS:0;552d6a33fa09:33397 {}] regionserver.HRegionServer(1603): Online Regions={1588230740=hbase:meta,,1.1588230740} 2024-12-06T10:19:47,264 DEBUG [RS:0;552d6a33fa09:33397 {}] regionserver.HRegionServer(1629): Waiting on 1588230740 2024-12-06T10:19:47,464 DEBUG [RS:0;552d6a33fa09:33397 {}] regionserver.HRegionServer(1629): Waiting on 1588230740 2024-12-06T10:19:47,529 INFO [RS_CLOSE_META-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=2.08 KB at sequenceid=93 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/hbase/meta/1588230740/.tmp/table/690449f4f59b4ad19f0bf42e41f175f7 2024-12-06T10:19:47,532 DEBUG [RS_CLOSE_META-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/hbase/meta/1588230740/.tmp/info/e73ff030214c452e94d315f3d1a348b7 as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/hbase/meta/1588230740/info/e73ff030214c452e94d315f3d1a348b7 2024-12-06T10:19:47,535 INFO [RS_CLOSE_META-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/hbase/meta/1588230740/info/e73ff030214c452e94d315f3d1a348b7, entries=22, sequenceid=93, filesize=7.5 K 2024-12-06T10:19:47,535 DEBUG [RS_CLOSE_META-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/hbase/meta/1588230740/.tmp/rep_barrier/8dba1966744044afbabcc68ae47de6b4 as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/hbase/meta/1588230740/rep_barrier/8dba1966744044afbabcc68ae47de6b4 2024-12-06T10:19:47,538 INFO [RS_CLOSE_META-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/hbase/meta/1588230740/rep_barrier/8dba1966744044afbabcc68ae47de6b4, entries=6, sequenceid=93, filesize=5.9 K 2024-12-06T10:19:47,539 DEBUG [RS_CLOSE_META-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/hbase/meta/1588230740/.tmp/table/690449f4f59b4ad19f0bf42e41f175f7 as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/hbase/meta/1588230740/table/690449f4f59b4ad19f0bf42e41f175f7 2024-12-06T10:19:47,542 INFO [RS_CLOSE_META-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/hbase/meta/1588230740/table/690449f4f59b4ad19f0bf42e41f175f7, entries=9, sequenceid=93, filesize=5.8 K 2024-12-06T10:19:47,542 INFO [RS_CLOSE_META-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3040): Finished flush of dataSize ~20.55 KB/21040, heapSize ~35.82 KB/36680, currentSize=0 B/0 for 1588230740 in 1283ms, sequenceid=93, compaction requested=false 2024-12-06T10:19:47,546 DEBUG [RS_CLOSE_META-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/data/hbase/meta/1588230740/recovered.edits/96.seqid, newMaxSeqId=96, maxSeqId=1 2024-12-06T10:19:47,546 DEBUG [RS_CLOSE_META-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-06T10:19:47,546 INFO [RS_CLOSE_META-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1922): Closed hbase:meta,,1.1588230740 2024-12-06T10:19:47,546 DEBUG [RS_CLOSE_META-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1635): Region close journal for 1588230740: 2024-12-06T10:19:47,546 DEBUG [RS_CLOSE_META-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-12-06T10:19:47,664 INFO [RS:0;552d6a33fa09:33397 {}] regionserver.HRegionServer(1250): stopping server 552d6a33fa09,33397,1733480204743; all regions closed. 2024-12-06T10:19:47,668 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073741834_1010 (size=26050) 2024-12-06T10:19:47,670 DEBUG [RS:0;552d6a33fa09:33397 {}] wal.AbstractFSWAL(1071): Moved 1 WAL file(s) to /user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/oldWALs 2024-12-06T10:19:47,670 INFO [RS:0;552d6a33fa09:33397 {}] wal.AbstractFSWAL(1074): Closed WAL: AsyncFSWAL 552d6a33fa09%2C33397%2C1733480204743.meta:.meta(num 1733480208116) 2024-12-06T10:19:47,671 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073741832_1008 (size=16554787) 2024-12-06T10:19:47,673 DEBUG [RS:0;552d6a33fa09:33397 {}] wal.AbstractFSWAL(1071): Moved 1 WAL file(s) to /user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/oldWALs 2024-12-06T10:19:47,673 INFO [RS:0;552d6a33fa09:33397 {}] wal.AbstractFSWAL(1074): Closed WAL: AsyncFSWAL 552d6a33fa09%2C33397%2C1733480204743:(num 1733480207145) 2024-12-06T10:19:47,673 DEBUG [RS:0;552d6a33fa09:33397 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-06T10:19:47,673 INFO [RS:0;552d6a33fa09:33397 {}] regionserver.LeaseManager(133): Closed leases 2024-12-06T10:19:47,673 INFO [RS:0;552d6a33fa09:33397 {}] hbase.ChoreService(370): Chore service for: regionserver/552d6a33fa09:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS] on shutdown 2024-12-06T10:19:47,673 INFO [regionserver/552d6a33fa09:0.logRoller {}] wal.AbstractWALRoller(243): LogRoller exiting. 2024-12-06T10:19:47,674 INFO [RS:0;552d6a33fa09:33397 {}] ipc.NettyRpcServer(351): Stopping server on /172.17.0.2:33397 2024-12-06T10:19:47,678 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33397-0x10066d47c570001, quorum=127.0.0.1:61610, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/552d6a33fa09,33397,1733480204743 2024-12-06T10:19:47,678 ERROR [Time-limited test-EventThread {}] zookeeper.ClientCnxn$EventThread(581): Error while calling watcher. java.util.concurrent.RejectedExecutionException: Task org.apache.hadoop.hbase.trace.TraceUtil$$Lambda$359/0x00007f82d08f3e60@41a6c3de rejected from java.util.concurrent.ThreadPoolExecutor@4a4558e[Terminated, pool size = 0, active threads = 0, queued tasks = 0, completed tasks = 15] at java.util.concurrent.ThreadPoolExecutor$AbortPolicy.rejectedExecution(ThreadPoolExecutor.java:2065) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.reject(ThreadPoolExecutor.java:833) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.execute(ThreadPoolExecutor.java:1365) ~[?:?] at java.util.concurrent.Executors$DelegatedExecutorService.execute(Executors.java:721) ~[?:?] at org.apache.hadoop.hbase.zookeeper.ZKWatcher.process(ZKWatcher.java:613) ~[hbase-zookeeper-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.zookeeper.ClientCnxn$EventThread.processEvent(ClientCnxn.java:579) ~[zookeeper-3.8.4.jar:3.8.4] at org.apache.zookeeper.ClientCnxn$EventThread.run(ClientCnxn.java:554) ~[zookeeper-3.8.4.jar:3.8.4] 2024-12-06T10:19:47,678 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33169-0x10066d47c570000, quorum=127.0.0.1:61610, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-06T10:19:47,679 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [552d6a33fa09,33397,1733480204743] 2024-12-06T10:19:47,679 DEBUG [RegionServerTracker-0 {}] master.DeadServer(103): Processing 552d6a33fa09,33397,1733480204743; numProcessing=1 2024-12-06T10:19:47,681 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/draining/552d6a33fa09,33397,1733480204743 already deleted, retry=false 2024-12-06T10:19:47,681 INFO [RegionServerTracker-0 {}] master.ServerManager(652): Cluster shutdown set; 552d6a33fa09,33397,1733480204743 expired; onlineServers=0 2024-12-06T10:19:47,681 INFO [RegionServerTracker-0 {}] regionserver.HRegionServer(2561): ***** STOPPING region server '552d6a33fa09,33169,1733480203965' ***** 2024-12-06T10:19:47,681 INFO [RegionServerTracker-0 {}] regionserver.HRegionServer(2575): STOPPED: Cluster shutdown set; onlineServer=0 2024-12-06T10:19:47,681 DEBUG [M:0;552d6a33fa09:33169 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@29c8ae9e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=552d6a33fa09/172.17.0.2:0 2024-12-06T10:19:47,681 INFO [M:0;552d6a33fa09:33169 {}] regionserver.HRegionServer(1224): stopping server 552d6a33fa09,33169,1733480203965 2024-12-06T10:19:47,681 INFO [M:0;552d6a33fa09:33169 {}] regionserver.HRegionServer(1250): stopping server 552d6a33fa09,33169,1733480203965; all regions closed. 2024-12-06T10:19:47,681 DEBUG [M:0;552d6a33fa09:33169 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-06T10:19:47,681 DEBUG [M:0;552d6a33fa09:33169 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-12-06T10:19:47,681 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-12-06T10:19:47,681 DEBUG [M:0;552d6a33fa09:33169 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-12-06T10:19:47,681 DEBUG [master/552d6a33fa09:0:becomeActiveMaster-HFileCleaner.small.0-1733480206852 {}] cleaner.HFileCleaner(306): Exit Thread[master/552d6a33fa09:0:becomeActiveMaster-HFileCleaner.small.0-1733480206852,5,FailOnTimeoutGroup] 2024-12-06T10:19:47,681 DEBUG [master/552d6a33fa09:0:becomeActiveMaster-HFileCleaner.large.0-1733480206851 {}] cleaner.HFileCleaner(306): Exit Thread[master/552d6a33fa09:0:becomeActiveMaster-HFileCleaner.large.0-1733480206851,5,FailOnTimeoutGroup] 2024-12-06T10:19:47,682 INFO [M:0;552d6a33fa09:33169 {}] hbase.ChoreService(370): Chore service for: master/552d6a33fa09:0 had [] on shutdown 2024-12-06T10:19:47,682 DEBUG [M:0;552d6a33fa09:33169 {}] master.HMaster(1733): Stopping service threads 2024-12-06T10:19:47,682 INFO [M:0;552d6a33fa09:33169 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-12-06T10:19:47,682 ERROR [M:0;552d6a33fa09:33169 {}] procedure2.ProcedureExecutor(722): There are still active thread in group java.lang.ThreadGroup[name=PEWorkerGroup,maxpri=10], see STDOUT java.lang.ThreadGroup[name=PEWorkerGroup,maxpri=10] Thread[HFileArchiver-5,5,PEWorkerGroup] Thread[HFileArchiver-6,5,PEWorkerGroup] 2024-12-06T10:19:47,683 INFO [M:0;552d6a33fa09:33169 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-12-06T10:19:47,683 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-12-06T10:19:47,683 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33169-0x10066d47c570000, quorum=127.0.0.1:61610, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-12-06T10:19:47,683 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33169-0x10066d47c570000, quorum=127.0.0.1:61610, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T10:19:47,683 DEBUG [M:0;552d6a33fa09:33169 {}] zookeeper.ZKUtil(347): master:33169-0x10066d47c570000, quorum=127.0.0.1:61610, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-12-06T10:19:47,683 WARN [M:0;552d6a33fa09:33169 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-12-06T10:19:47,683 INFO [M:0;552d6a33fa09:33169 {}] assignment.AssignmentManager(391): Stopping assignment manager 2024-12-06T10:19:47,683 INFO [M:0;552d6a33fa09:33169 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-12-06T10:19:47,683 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:33169-0x10066d47c570000, quorum=127.0.0.1:61610, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-06T10:19:47,684 DEBUG [M:0;552d6a33fa09:33169 {}] regionserver.HRegion(1681): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-06T10:19:47,684 INFO [M:0;552d6a33fa09:33169 {}] regionserver.HRegion(1703): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-06T10:19:47,684 DEBUG [M:0;552d6a33fa09:33169 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-06T10:19:47,684 DEBUG [M:0;552d6a33fa09:33169 {}] regionserver.HRegion(1791): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-06T10:19:47,684 DEBUG [M:0;552d6a33fa09:33169 {}] regionserver.HRegion(1801): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-06T10:19:47,684 INFO [M:0;552d6a33fa09:33169 {}] regionserver.HRegion(2837): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=825.35 KB heapSize=1018.21 KB 2024-12-06T10:19:47,699 DEBUG [M:0;552d6a33fa09:33169 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/92dc54fe06494ecfa30a5e63f5201814 is 82, key is hbase:meta,,1/info:regioninfo/1733480208257/Put/seqid=0 2024-12-06T10:19:47,702 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742561_1737 (size=5672) 2024-12-06T10:19:47,702 INFO [M:0;552d6a33fa09:33169 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=2390 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/92dc54fe06494ecfa30a5e63f5201814 2024-12-06T10:19:47,724 DEBUG [M:0;552d6a33fa09:33169 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/9115dd9815744638976bb7287efed5e6 is 2284, key is \x00\x00\x00\x00\x00\x00\x00\x9E/proc:d/1733480361005/Put/seqid=0 2024-12-06T10:19:47,731 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742562_1738 (size=46112) 2024-12-06T10:19:47,780 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33397-0x10066d47c570001, quorum=127.0.0.1:61610, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-06T10:19:47,780 INFO [RS:0;552d6a33fa09:33397 {}] regionserver.HRegionServer(1307): Exiting; stopping=552d6a33fa09,33397,1733480204743; zookeeper connection closed. 2024-12-06T10:19:47,780 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33397-0x10066d47c570001, quorum=127.0.0.1:61610, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-06T10:19:47,780 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@2e3ad341 {}] hbase.MiniHBaseCluster$SingleFileSystemShutdownThread(216): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@2e3ad341 2024-12-06T10:19:47,780 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-12-06T10:19:48,132 INFO [M:0;552d6a33fa09:33169 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=824.80 KB at sequenceid=2390 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/9115dd9815744638976bb7287efed5e6 2024-12-06T10:19:48,135 INFO [M:0;552d6a33fa09:33169 {}] regionserver.StoreFileReader(539): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 9115dd9815744638976bb7287efed5e6 2024-12-06T10:19:48,150 DEBUG [M:0;552d6a33fa09:33169 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/50b7fd8286784d63983e4d7723e52c81 is 69, key is 552d6a33fa09,33397,1733480204743/rs:state/1733480206890/Put/seqid=0 2024-12-06T10:19:48,153 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073742563_1739 (size=5156) 2024-12-06T10:19:48,554 INFO [M:0;552d6a33fa09:33169 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=2390 (bloomFilter=true), to=hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/50b7fd8286784d63983e4d7723e52c81 2024-12-06T10:19:48,557 DEBUG [M:0;552d6a33fa09:33169 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/92dc54fe06494ecfa30a5e63f5201814 as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/92dc54fe06494ecfa30a5e63f5201814 2024-12-06T10:19:48,559 INFO [M:0;552d6a33fa09:33169 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/92dc54fe06494ecfa30a5e63f5201814, entries=8, sequenceid=2390, filesize=5.5 K 2024-12-06T10:19:48,560 DEBUG [M:0;552d6a33fa09:33169 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/9115dd9815744638976bb7287efed5e6 as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/9115dd9815744638976bb7287efed5e6 2024-12-06T10:19:48,562 INFO [M:0;552d6a33fa09:33169 {}] regionserver.StoreFileReader(539): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 9115dd9815744638976bb7287efed5e6 2024-12-06T10:19:48,562 INFO [M:0;552d6a33fa09:33169 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/9115dd9815744638976bb7287efed5e6, entries=185, sequenceid=2390, filesize=45.0 K 2024-12-06T10:19:48,563 DEBUG [M:0;552d6a33fa09:33169 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/50b7fd8286784d63983e4d7723e52c81 as hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/50b7fd8286784d63983e4d7723e52c81 2024-12-06T10:19:48,565 INFO [M:0;552d6a33fa09:33169 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40601/user/jenkins/test-data/d8fb5dfd-32ad-0f76-df9c-2040896a1ea4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/50b7fd8286784d63983e4d7723e52c81, entries=1, sequenceid=2390, filesize=5.0 K 2024-12-06T10:19:48,566 INFO [M:0;552d6a33fa09:33169 {}] regionserver.HRegion(3040): Finished flush of dataSize ~825.35 KB/845163, heapSize ~1017.91 KB/1042344, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 882ms, sequenceid=2390, compaction requested=false 2024-12-06T10:19:48,568 INFO [M:0;552d6a33fa09:33169 {}] regionserver.HRegion(1922): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-06T10:19:48,568 DEBUG [M:0;552d6a33fa09:33169 {}] regionserver.HRegion(1635): Region close journal for 1595e783b53d99cd5eef43b6debb2682: 2024-12-06T10:19:48,570 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37093 is added to blk_1073741830_1006 (size=1001776) 2024-12-06T10:19:48,571 INFO [M:0;552d6a33fa09:33169 {}] flush.MasterFlushTableProcedureManager(91): stop: server shutting down. 2024-12-06T10:19:48,571 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(243): LogRoller exiting. 2024-12-06T10:19:48,571 INFO [M:0;552d6a33fa09:33169 {}] ipc.NettyRpcServer(351): Stopping server on /172.17.0.2:33169 2024-12-06T10:19:48,573 DEBUG [M:0;552d6a33fa09:33169 {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/rs/552d6a33fa09,33169,1733480203965 already deleted, retry=false 2024-12-06T10:19:48,675 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33169-0x10066d47c570000, quorum=127.0.0.1:61610, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-06T10:19:48,675 INFO [M:0;552d6a33fa09:33169 {}] regionserver.HRegionServer(1307): Exiting; stopping=552d6a33fa09,33169,1733480203965; zookeeper connection closed. 2024-12-06T10:19:48,675 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33169-0x10066d47c570000, quorum=127.0.0.1:61610, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-06T10:19:48,680 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@3054265c{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-06T10:19:48,682 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@65902fec{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-06T10:19:48,682 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-06T10:19:48,683 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@47db50b9{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-06T10:19:48,683 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2ca71a25{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/919d2d74-7337-66ca-1565-c2037856b4c9/hadoop.log.dir/,STOPPED} 2024-12-06T10:19:48,685 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-06T10:19:48,685 WARN [BP-1797330189-172.17.0.2-1733480200959 heartbeating to localhost/127.0.0.1:40601 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-06T10:19:48,686 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-06T10:19:48,686 WARN [BP-1797330189-172.17.0.2-1733480200959 heartbeating to localhost/127.0.0.1:40601 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1797330189-172.17.0.2-1733480200959 (Datanode Uuid 7a5359e7-667f-49eb-befc-2063e29c50e2) service to localhost/127.0.0.1:40601 2024-12-06T10:19:48,688 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/919d2d74-7337-66ca-1565-c2037856b4c9/cluster_b84c5cb5-887e-107d-c16e-13155fed988e/dfs/data/data1/current/BP-1797330189-172.17.0.2-1733480200959 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-06T10:19:48,689 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/919d2d74-7337-66ca-1565-c2037856b4c9/cluster_b84c5cb5-887e-107d-c16e-13155fed988e/dfs/data/data2/current/BP-1797330189-172.17.0.2-1733480200959 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-06T10:19:48,689 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-06T10:19:48,700 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@b03fcff{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-06T10:19:48,700 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@e0a3ea0{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-06T10:19:48,700 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-06T10:19:48,700 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@62a9beb2{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-06T10:19:48,700 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@8167a4c{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/919d2d74-7337-66ca-1565-c2037856b4c9/hadoop.log.dir/,STOPPED} 2024-12-06T10:19:48,721 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(345): Shutdown MiniZK cluster with all ZK servers 2024-12-06T10:19:48,881 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1347): Minicluster is down